text
stringlengths 2
999k
|
|---|
# coding: utf-8
"""
Looker API 3.0 Reference
### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning)
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class LDAPUserAttributeRead(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, required=None, user_attributes=None, url=None, can=None):
"""
LDAPUserAttributeRead - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'required': 'bool',
'user_attributes': 'list[UserAttribute]',
'url': 'str',
'can': 'dict(str, bool)'
}
self.attribute_map = {
'name': 'name',
'required': 'required',
'user_attributes': 'user_attributes',
'url': 'url',
'can': 'can'
}
self._name = name
self._required = required
self._user_attributes = user_attributes
self._url = url
self._can = can
@property
def name(self):
"""
Gets the name of this LDAPUserAttributeRead.
Name of User Attribute in LDAP
:return: The name of this LDAPUserAttributeRead.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this LDAPUserAttributeRead.
Name of User Attribute in LDAP
:param name: The name of this LDAPUserAttributeRead.
:type: str
"""
self._name = name
@property
def required(self):
"""
Gets the required of this LDAPUserAttributeRead.
Required to be in LDAP assertion for login to be allowed to succeed
:return: The required of this LDAPUserAttributeRead.
:rtype: bool
"""
return self._required
@required.setter
def required(self, required):
"""
Sets the required of this LDAPUserAttributeRead.
Required to be in LDAP assertion for login to be allowed to succeed
:param required: The required of this LDAPUserAttributeRead.
:type: bool
"""
self._required = required
@property
def user_attributes(self):
"""
Gets the user_attributes of this LDAPUserAttributeRead.
Looker User Attributes
:return: The user_attributes of this LDAPUserAttributeRead.
:rtype: list[UserAttribute]
"""
return self._user_attributes
@user_attributes.setter
def user_attributes(self, user_attributes):
"""
Sets the user_attributes of this LDAPUserAttributeRead.
Looker User Attributes
:param user_attributes: The user_attributes of this LDAPUserAttributeRead.
:type: list[UserAttribute]
"""
self._user_attributes = user_attributes
@property
def url(self):
"""
Gets the url of this LDAPUserAttributeRead.
Link to LDAP config
:return: The url of this LDAPUserAttributeRead.
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""
Sets the url of this LDAPUserAttributeRead.
Link to LDAP config
:param url: The url of this LDAPUserAttributeRead.
:type: str
"""
self._url = url
@property
def can(self):
"""
Gets the can of this LDAPUserAttributeRead.
Operations the current user is able to perform on this object
:return: The can of this LDAPUserAttributeRead.
:rtype: dict(str, bool)
"""
return self._can
@can.setter
def can(self, can):
"""
Sets the can of this LDAPUserAttributeRead.
Operations the current user is able to perform on this object
:param can: The can of this LDAPUserAttributeRead.
:type: dict(str, bool)
"""
self._can = can
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, LDAPUserAttributeRead):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import sys
sys.path.append('../')
import re
from pyquery import PyQuery as pq#need install
from lxml import etree#need install
from bs4 import BeautifulSoup#need install
import json
from ADC_function import *
from WebCrawler import javbus
'''
API
注册:https://www.airav.wiki/api/auth/signup
设置:https://www.airav.wiki/api/get_web_settings
搜索:https://www.airav.wiki/api/video/list?lng=zh-CN&search=
搜索:https://www.airav.wiki/api/video/list?lang=zh-TW&lng=zh-TW&search=
'''
host = 'https://www.airav.wiki'
# airav这个网站没有演员图片,所以直接使用javbus的图
def getActorPhoto(javbus_json):
result = javbus_json.get('actor_photo')
if isinstance(result, dict) and len(result):
return result
return ''
def getTitle(htmlcode): #获取标题
html = etree.fromstring(htmlcode, etree.HTMLParser())
title = str(html.xpath('/html/head/title/text()')[0])
result = str(re.findall('](.*?)- AIRAV-WIKI', title)[0]).strip()
return result
def getStudio(htmlcode, javbus_json): #获取厂商 已修改
# javbus如果有数据以它为准
result = javbus_json.get('studio')
if isinstance(result, str) and len(result):
return result
html = etree.fromstring(htmlcode,etree.HTMLParser())
return str(html.xpath('//a[contains(@href,"?video_factory=")]/text()')).strip(" ['']")
def getYear(htmlcode, javbus_json): #获取年份
result = javbus_json.get('year')
if isinstance(result, str) and len(result):
return result
release = getRelease(htmlcode, javbus_json)
if len(release) != len('2000-01-01'):
return ''
return release[:4]
def getCover(htmlcode, javbus_json): #获取封面图片
result = javbus_json.get('cover')
if isinstance(result, str) and len(result):
return result
html = etree.fromstring(htmlcode, etree.HTMLParser())
return html.xpath('//img[contains(@src,"/storage/big_pic/")]/@src')[0]
def getRelease(htmlcode, javbus_json): #获取出版日期
result = javbus_json.get('release')
if isinstance(result, str) and len(result):
return result
html = etree.fromstring(htmlcode, etree.HTMLParser())
try:
result = re.search(r'\d{4}-\d{2}-\d{2}', str(html.xpath('//li[contains(text(),"發片日期")]/text()'))).group()
except:
return ''
return result
def getRuntime(javbus_json): #获取播放时长
result = javbus_json.get('runtime')
if isinstance(result, str) and len(result):
return result
return ''
# airav女优数据库较多日文汉字姓名,javbus较多日语假名,因此airav优先
def getActor(htmlcode, javbus_json): #获取女优
b=[]
html = etree.fromstring(htmlcode, etree.HTMLParser())
a = html.xpath('//ul[@class="videoAvstarList"]/li/a[starts-with(@href,"/idol/")]/text()')
for v in a:
v = v.strip()
if len(v):
b.append(v)
if len(b):
return b
result = javbus_json.get('actor')
if isinstance(result, list) and len(result):
return result
return []
def getNum(htmlcode, javbus_json): #获取番号
result = javbus_json.get('number')
if isinstance(result, str) and len(result):
return result
html = etree.fromstring(htmlcode, etree.HTMLParser())
title = str(html.xpath('/html/head/title/text()')[0])
result = str(re.findall('^\[(.*?)]', title)[0])
return result
def getDirector(javbus_json): #获取导演 已修改
result = javbus_json.get('director')
if isinstance(result, str) and len(result):
return result
return ''
def getOutline(htmlcode): #获取概述
html = etree.fromstring(htmlcode, etree.HTMLParser())
try:
result = html.xpath("string(//div[@class='d-flex videoDataBlock']/div[@class='synopsis']/p)").replace('\n','').strip()
return result
except:
return ''
def getSerise(javbus_json): #获取系列 已修改
result = javbus_json.get('series')
if isinstance(result, str) and len(result):
return result
return ''
def getTag(htmlcode): # 获取标签
tag = []
soup = BeautifulSoup(htmlcode, 'lxml')
x = soup.find_all(attrs={'class': 'tagBtnMargin'})
a = x[0].find_all('a')
for i in a:
tag.append(i.get_text())
return tag
def getExtrafanart(htmlcode): # 获取剧照
html_pather = re.compile(r'<div class=\"mobileImgThumbnail\">[\s\S]*?</div></div></div></div>')
html = html_pather.search(htmlcode)
if html:
html = html.group()
extrafanart_pather = re.compile(r'<img.*?src=\"(.*?)\"')
extrafanart_imgs = extrafanart_pather.findall(html)
if extrafanart_imgs:
return extrafanart_imgs
return ''
def search(keyword): #搜索,返回结果
result = []
page = 1
while page > 0:
# search_result = {"offset": 0,"count": 4,"result": [
# {"vid": "99-07-15076","slug": "Wrop6o","name": "朝ゴミ出しする近所の遊び好きノーブラ奥さん 江波りゅう",
# "url": "","view": 98,"img_url": "https://wiki-img.airav.wiki/storage/big_pic/99-07-15076.jpg","barcode": "_1pondo_012717_472"},
# {"vid": "99-27-00286","slug": "DlPEua","name": "放課後に、仕込んでください 〜優等生は無言でスカートを捲り上げる〜",
# "url": "","view": 69,"img_url": "https://wiki-img.airav.wiki/storage/big_pic/99-27-00286.jpg","barcode": "caribbeancom012717-360"},
# {"vid": "99-07-15070","slug": "VLS3WY","name": "放課後に、仕込んでください ~優等生は無言でスカートを捲り上げる~ ももき希",
# "url": "","view": 58,"img_url": "https://wiki-img.airav.wiki/storage/big_pic/99-07-15070.jpg","barcode": "caribbeancom_012717-360"},
# {"vid": "99-27-00287","slug": "YdMVb3","name": "朝ゴミ出しする近所の遊び好きノーブラ奥さん 江波りゅう",
# "url": "","view": 56,"img_url": "https://wiki-img.airav.wiki/storage/big_pic/99-27-00287.jpg","barcode": "1pondo_012717_472"}
# ],"status": "ok"}
search_result = get_html(host + '/api/video/list?lang=zh-TW&lng=jp&search=' + keyword + '&page=' + str(page))
try:
json_data = json.loads(search_result)
except json.decoder.JSONDecodeError:
print("[-]Json decoder error!")
return []
result_offset = int(json_data["offset"])
result_count = int(json_data["count"])
result_size = len(json_data["result"])
if result_count <= 0 or result_size <= 0:
return result
elif result_count > result_offset + result_size: #请求下一页内容
result.extend(json_data["result"])
page += 1
elif result_count == result_offset + result_size: #请求最后一页内容
result.extend(json_data["result"])
page = 0
else:
page = 0
return result
def main(number):
try:
try:
htmlcode = get_html('https://cn.airav.wiki/video/' + number)
javbus_json = json.loads(javbus.main(number))
except:
print(number)
dic = {
# 标题可使用airav
'title': getTitle(htmlcode),
# 制作商先找javbus,如果没有再找本站
'studio': getStudio(htmlcode, javbus_json),
# 年份先试javbus,如果没有再找本站
'year': getYear(htmlcode, javbus_json),
# 简介 使用 airav
'outline': getOutline(htmlcode),
# 使用javbus
'runtime': getRuntime(javbus_json),
# 导演 使用javbus
'director': getDirector(javbus_json),
# 演员 先试airav
'actor': getActor(htmlcode, javbus_json),
# 发售日先试javbus
'release': getRelease(htmlcode, javbus_json),
# 番号使用javbus
'number': getNum(htmlcode, javbus_json),
# 封面链接 使用javbus
'cover': getCover(htmlcode, javbus_json),
# 剧照获取
'extrafanart': getExtrafanart(htmlcode),
'imagecut': 1,
# 使用 airav
'tag': getTag(htmlcode),
# 使用javbus
'label': getSerise(javbus_json),
'actor_photo': getActorPhoto(javbus_json),
'website': 'https://www.airav.wiki/video/' + number,
'source': 'airav.py',
# 使用javbus
'series': getSerise(javbus_json)
}
js = json.dumps(dic, ensure_ascii=False, sort_keys=True, indent=4,separators=(',', ':'), ) # .encode('UTF-8')
return js
except Exception as e:
if config.getInstance().debug():
print(e)
data = {
"title": "",
}
js = json.dumps(
data, ensure_ascii=False, sort_keys=True, indent=4, separators=(",", ":")
)
return js
if __name__ == '__main__':
config.getInstance().set_override("actor_photo:download_for_kodi=1")
config.getInstance().set_override("debug_mode:switch=1")
print(main('ADV-R0624')) # javbus页面返回404, airav有数据
print(main('ADN-188')) # 一人
print(main('CJOD-278')) # 多人 javbus演员名称采用日语假名,airav采用日文汉字
|
from .Command import *
from .Environment import Environment
from .Interpreter import Interpreter
|
import csv
import io
import json
import logging
import uuid
from abc import ABCMeta, abstractmethod
from collections import defaultdict, namedtuple
from contextlib import closing
from itertools import chain
from typing import Set
import psycopg2
from botocore.exceptions import ClientError
from csp.decorators import csp_update
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.core import serializers
from django.core.exceptions import PermissionDenied
from django.core.paginator import Paginator
from django.core.serializers.json import DjangoJSONEncoder
from django.db import connections, ProgrammingError
from django.db.models import (
Count,
F,
CharField,
Value,
Func,
Q,
Prefetch,
)
from django.db.models.functions import TruncDay
from django.forms.models import model_to_dict
from django.http import (
Http404,
HttpResponse,
HttpResponseForbidden,
HttpResponseNotFound,
HttpResponseRedirect,
HttpResponseServerError,
JsonResponse,
)
from django.shortcuts import get_object_or_404, render, redirect
from django.urls import reverse
from django.views.decorators.http import (
require_GET,
require_http_methods,
)
from django.views.generic import DetailView, FormView, TemplateView, UpdateView, View
from psycopg2 import sql
from waffle.mixins import WaffleFlagMixin
from dataworkspace import datasets_db
from dataworkspace.apps.api_v1.core.views import invalidate_superset_user_cached_credentials
from dataworkspace.apps.applications.models import ApplicationInstance
from dataworkspace.apps.core.boto3_client import get_s3_client
from dataworkspace.apps.core.charts.models import ChartBuilderChart
from dataworkspace.apps.core.charts.tasks import run_chart_builder_query
from dataworkspace.apps.core.errors import DatasetPermissionDenied, DatasetPreviewDisabledError
from dataworkspace.apps.core.utils import (
StreamingHttpResponseWithoutDjangoDbConnection,
database_dsn,
streaming_query_response,
table_data,
view_exists,
get_random_data_sample,
)
from dataworkspace.apps.core.models import (
Database,
)
from dataworkspace.apps.datasets.constants import (
DataSetType,
DataLinkType,
)
from dataworkspace.apps.datasets.constants import TagType
from dataworkspace.apps.datasets.forms import (
ChartSourceSelectForm,
DatasetEditForm,
DatasetSearchForm,
EligibilityCriteriaForm,
RelatedMastersSortForm,
RelatedDataCutsSortForm,
RelatedVisualisationsSortForm,
UserSearchForm,
VisualisationCatalogueItemEditForm,
)
from dataworkspace.apps.datasets.models import (
CustomDatasetQuery,
DataCutDataset,
DataSet,
DataSetUserPermission,
PendingAuthorizedUsers,
MasterDataset,
ReferenceDataset,
ReferenceDatasetField,
SourceLink,
SourceView,
VisualisationCatalogueItem,
SourceTable,
ToolQueryAuditLogTable,
Tag,
VisualisationUserPermission,
SourceTableFieldDefinition,
)
from dataworkspace.apps.datasets.permissions.utils import (
process_dataset_authorized_users_change,
process_visualisation_catalogue_item_authorized_users_change,
)
from dataworkspace.apps.datasets.search import search_for_datasets
from dataworkspace.apps.datasets.utils import (
build_filtered_dataset_query,
dataset_type_to_manage_unpublished_permission_codename,
find_dataset,
get_code_snippets_for_table,
get_code_snippets_for_query,
get_code_snippets_for_reference_table,
get_detailed_changelog,
)
from dataworkspace.apps.eventlog.models import EventLog
from dataworkspace.apps.eventlog.utils import log_event, log_permission_change
from dataworkspace.apps.explorer.utils import invalidate_data_explorer_user_cached_credentials
logger = logging.getLogger("app")
def _matches_filters(
data,
unpublished: bool,
opendata: bool,
withvisuals: bool,
use: Set,
data_type: Set,
source_ids: Set,
topic_ids: Set,
user_accessible: bool = False,
user_inaccessible: bool = False,
selected_user_datasets: Set = None,
):
subscribed_or_bookmarked = set()
if data["is_bookmarked"]:
subscribed_or_bookmarked.add("bookmarked")
if data["is_subscribed"]:
subscribed_or_bookmarked.add("subscribed")
return (
(
not selected_user_datasets
or selected_user_datasets == [None]
or set(selected_user_datasets).intersection(subscribed_or_bookmarked)
)
and (
not selected_user_datasets
or selected_user_datasets == [None]
or set(selected_user_datasets).intersection(subscribed_or_bookmarked)
)
and (unpublished or data["published"])
and (not opendata or data["is_open_data"])
and (not withvisuals or data["has_visuals"])
and (not data_type or data_type == [None] or data["data_type"] in data_type)
and (not source_ids or source_ids.intersection(set(data["source_tag_ids"])))
and (not topic_ids or topic_ids.intersection(set(data["topic_tag_ids"])))
and (not user_accessible or data["has_access"])
and (not user_inaccessible or not data["has_access"])
)
def has_unpublished_dataset_access(user):
access = user.is_superuser
for dataset_type in DataSetType:
access = access or user.has_perm(
dataset_type_to_manage_unpublished_permission_codename(dataset_type.value)
)
return access
def _get_tags_as_dict():
"""
Gets all tags and returns them as a dictionary keyed by the tag.id as a string
@return:
"""
tags = Tag.objects.all()
tags_dict = {}
for tag in tags:
tags_dict[str(tag.id)] = model_to_dict(tag)
return tags_dict
@require_GET
def find_datasets(request):
###############
# Validate form
form = DatasetSearchForm(request.GET)
if not form.is_valid():
logger.warning(form.errors)
return HttpResponseRedirect(reverse("datasets:find_datasets"))
data_types = form.fields[
"data_type"
].choices # Cache these now, as we annotate them with result numbers later which we don't want here.
###############################################
# Find all results, and matching filter numbers
filters = form.get_filters()
all_visible_datasets, matched_datasets = search_for_datasets(
request.user, filters, _matches_filters
)
form.annotate_and_update_filters(
all_visible_datasets,
matcher=_matches_filters,
)
####################################
# Select the current page of results
paginator = Paginator(
matched_datasets,
settings.SEARCH_RESULTS_DATASETS_PER_PAGE,
)
datasets = paginator.get_page(request.GET.get("page"))
########################################################
# Augment results with tags, avoiding queries-per-result
tags_dict = _get_tags_as_dict()
for dataset in datasets:
dataset["sources"] = [
tags_dict.get(str(source_id)) for source_id in dataset["source_tag_ids"]
]
dataset["topics"] = [tags_dict.get(str(topic_id)) for topic_id in dataset["topic_tag_ids"]]
######################################################################
# Augment results with last updated dates, avoiding queries-per-result
# Data structures to quickly look up datasets as needed further down
datasets_by_type = defaultdict(list)
datasets_by_type_id = {}
for dataset in datasets:
datasets_by_type[dataset["data_type"]].append(dataset)
datasets_by_type_id[(dataset["data_type"], dataset["id"])] = dataset
# Reference datasets
reference_datasets = ReferenceDataset.objects.filter(
uuid__in=tuple(dataset["id"] for dataset in datasets_by_type[DataSetType.REFERENCE.value])
)
for reference_dataset in reference_datasets:
dataset = datasets_by_type_id[(DataSetType.REFERENCE.value, reference_dataset.uuid)]
try:
# If the reference dataset csv table doesn't exist we
# get an unhandled relation does not exist error
# this is currently only a problem with integration tests
dataset["last_updated"] = reference_dataset.data_last_updated
except ProgrammingError as e:
logger.error(e)
dataset["last_updated"] = None
# Master datasets and datacuts together to minimise metadata table queries
master_datasets = MasterDataset.objects.filter(
id__in=tuple(dataset["id"] for dataset in datasets_by_type[DataSetType.MASTER.value])
).prefetch_related("sourcetable_set")
datacut_datasets = DataCutDataset.objects.filter(
id__in=tuple(dataset["id"] for dataset in datasets_by_type[DataSetType.DATACUT.value])
).prefetch_related(
Prefetch(
"customdatasetquery_set",
queryset=CustomDatasetQuery.objects.prefetch_related("tables"),
)
)
databases = {database.id: database for database in Database.objects.all()}
tables_and_last_updated_dates = datasets_db.get_all_tables_last_updated_date(
[
(databases[table.database_id].memorable_name, table.schema, table.table)
for master_dataset in master_datasets
for table in master_dataset.sourcetable_set.all()
]
+ [
(databases[query.database_id].memorable_name, table.schema, table.table)
for datacut_dataset in datacut_datasets
for query in datacut_dataset.customdatasetquery_set.all()
for table in query.tables.all()
]
)
def _without_none(it):
return (val for val in it if val is not None)
for master_dataset in master_datasets:
dataset = datasets_by_type_id[(DataSetType.MASTER.value, master_dataset.id)]
dataset["last_updated"] = max(
_without_none(
(
tables_and_last_updated_dates[databases[table.database_id].memorable_name].get(
(table.schema, table.table)
)
for table in master_dataset.sourcetable_set.all()
)
),
default=None,
)
for datacut_dataset in datacut_datasets:
dataset = datasets_by_type_id[(DataSetType.DATACUT.value, datacut_dataset.id)]
last_updated_dates_for_queries = (
(
tables_and_last_updated_dates[databases[query.database_id].memorable_name].get(
(table.schema, table.table)
)
for table in query.tables.all()
)
for query in datacut_dataset.customdatasetquery_set.all()
)
dataset["last_updated"] = max(
_without_none(
(
min(_without_none(last_updated_dates_for_query), default=None)
for last_updated_dates_for_query in last_updated_dates_for_queries
)
),
default=None,
)
# Visualisations
visualisation_datasets = VisualisationCatalogueItem.objects.filter(
id__in=tuple(
dataset["id"] for dataset in datasets_by_type[DataSetType.VISUALISATION.value]
)
).prefetch_related("visualisationlink_set")
for visualisation_dataset in visualisation_datasets:
dataset = datasets_by_type_id[(DataSetType.VISUALISATION.value, visualisation_dataset.id)]
dataset["last_updated"] = max(
_without_none(
(
link.data_source_last_updated
for link in visualisation_dataset.visualisationlink_set.all()
)
),
default=None,
)
return render(
request,
"datasets/index.html",
{
"form": form,
"query": filters.query,
"datasets": datasets,
"data_type": dict(data_types),
"show_admin_filters": has_unpublished_dataset_access(request.user),
"DATASET_FINDER_FLAG": settings.DATASET_FINDER_ADMIN_ONLY_FLAG,
"search_type": "searchBar" if filters.query else "noSearch",
"has_filters": filters.has_filters(),
},
)
class DatasetDetailView(DetailView):
def _is_reference_dataset(self):
return isinstance(self.object, ReferenceDataset)
def _is_visualisation(self):
return isinstance(self.object, VisualisationCatalogueItem)
def get_object(self, queryset=None):
return find_dataset(self.kwargs["dataset_uuid"], self.request.user)
@csp_update(frame_src=settings.QUICKSIGHT_DASHBOARD_HOST)
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
def _get_source_text(self, model):
source_text = ",".join(
sorted({t.name for t in self.object.tags.filter(type=TagType.SOURCE)})
)
return source_text
def _get_user_tools_access(self) -> bool:
user_has_tools_access = self.request.user.user_permissions.filter(
codename="start_all_applications",
content_type=ContentType.objects.get_for_model(ApplicationInstance),
).exists()
return user_has_tools_access
def _get_context_data_for_master_dataset(self, ctx, **kwargs):
source_tables = sorted(self.object.sourcetable_set.all(), key=lambda x: x.name)
MasterDatasetInfo = namedtuple(
"MasterDatasetInfo", ("source_table", "code_snippets", "columns")
)
master_datasets_info = [
MasterDatasetInfo(
source_table=source_table,
code_snippets=get_code_snippets_for_table(source_table),
columns=datasets_db.get_columns(
source_table.database.memorable_name,
schema=source_table.schema,
table=source_table.table,
include_types=True,
),
)
for source_table in sorted(source_tables, key=lambda x: x.name)
]
summarised_update_frequency = ",".join(
sorted({t.get_frequency_display() for t in source_tables})
)
subscription = self.object.subscriptions.filter(user=self.request.user)
ctx.update(
{
"summarised_update_frequency": summarised_update_frequency,
"source_text": self._get_source_text(self.object),
"has_access": self.object.user_has_access(self.request.user),
"has_tools_access": self._get_user_tools_access(),
"is_bookmarked": self.object.user_has_bookmarked(self.request.user),
"master_datasets_info": master_datasets_info,
"source_table_type": DataLinkType.SOURCE_TABLE,
"related_data": self.object.related_datasets(),
"related_visualisations": self.object.related_visualisations.filter(
published=True
),
"subscription": {
"current_user_is_subscribed": subscription.exists()
and subscription.first().is_active(),
"details": subscription.first(),
},
}
)
return ctx
def _get_context_data_for_datacut_dataset(self, ctx, **kwargs):
custom_queries = self.object.customdatasetquery_set.all().prefetch_related("tables")
datacut_links = sorted(
chain(
self.object.sourcetable_set.all(),
self.object.sourcelink_set.all(),
custom_queries,
),
key=lambda x: x.name,
)
summarised_update_frequency = ",".join(
sorted({t.get_frequency_display() for t in datacut_links})
)
DatacutLinkInfo = namedtuple(
"DatacutLinkInfo",
("datacut_link", "can_show_link", "code_snippets", "columns"),
)
datacut_links_info = [
DatacutLinkInfo(
datacut_link=datacut_link,
can_show_link=datacut_link.can_show_link_for_user(self.request.user),
code_snippets=(
get_code_snippets_for_query(datacut_link.query)
if hasattr(datacut_link, "query")
else None
),
columns=(
datasets_db.get_columns(
database_name=datacut_link.database.memorable_name,
query=datacut_link.query,
include_types=True,
)
if hasattr(datacut_link, "query")
else None
),
)
for datacut_link in datacut_links
]
subscription = self.object.subscriptions.filter(user=self.request.user)
ctx.update(
{
"has_access": self.object.user_has_access(self.request.user),
"is_bookmarked": self.object.user_has_bookmarked(self.request.user),
"datacut_links_info": datacut_links_info,
"data_hosted_externally": any(
not source_link.url.startswith("s3://")
for source_link in self.object.sourcelink_set.all()
),
"custom_dataset_query_type": DataLinkType.CUSTOM_QUERY,
"related_data": self.object.related_datasets(),
"related_visualisations": self.object.related_visualisations.filter(
published=True
),
"summarised_update_frequency": summarised_update_frequency,
"source_text": self._get_source_text(self.object),
"subscription": {
"current_user_is_subscribed": subscription.exists()
and subscription.first().is_active(),
"details": subscription.first(),
},
}
)
return ctx
def _get_context_data_for_reference_dataset(self, ctx, **kwargs):
records = self.object.get_records()
total_record_count = records.count()
preview_limit = self.get_preview_limit(total_record_count)
records = records[:preview_limit]
code_snippets = get_code_snippets_for_reference_table(self.object.table_name)
columns = None
if self.object.external_database:
columns = datasets_db.get_columns(
self.object.external_database.memorable_name,
schema="public",
table=self.object.table_name,
include_types=True,
)
subscription = self.object.subscriptions.filter(user=self.request.user)
ctx.update(
{
"preview_limit": preview_limit,
"record_count": total_record_count,
"records": records,
"is_bookmarked": self.object.user_has_bookmarked(self.request.user),
"DATA_GRID_REFERENCE_DATASET_FLAG": settings.DATA_GRID_REFERENCE_DATASET_FLAG,
"code_snippets": code_snippets,
"columns": columns,
"subscription": {
"current_user_is_subscribed": subscription.exists()
and subscription.first().is_active(),
"details": subscription.first(),
},
}
)
return ctx
def _get_context_data_for_visualisation(self, ctx, **kwargs):
ctx.update(
{
"has_access": self.object.user_has_access(self.request.user),
"is_bookmarked": self.object.user_has_bookmarked(self.request.user),
"visualisation_links": self.object.get_visualisation_links(self.request),
"summarised_update_frequency": "N/A",
"source_text": self._get_source_text(self.object),
}
)
return ctx
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx["model"] = self.object
ctx["DATA_CUT_ENHANCED_PREVIEW_FLAG"] = settings.DATA_CUT_ENHANCED_PREVIEW_FLAG
ctx["DATASET_CHANGELOG_PAGE_FLAG"] = settings.DATASET_CHANGELOG_PAGE_FLAG
ctx["DATA_UPLOADER_UI_FLAG"] = settings.DATA_UPLOADER_UI_FLAG
if self._is_reference_dataset():
return self._get_context_data_for_reference_dataset(ctx, **kwargs)
elif self._is_visualisation():
return self._get_context_data_for_visualisation(ctx, **kwargs)
elif self.object.type == DataSetType.MASTER:
return self._get_context_data_for_master_dataset(ctx, **kwargs)
elif self.object.type == DataSetType.DATACUT:
return self._get_context_data_for_datacut_dataset(ctx, **kwargs)
raise ValueError(f"Unknown dataset/type for {self.__class__.__name__}: {self.object}")
def get_template_names(self):
if self._is_reference_dataset():
return ["datasets/referencedataset_detail.html"]
elif self.object.type == DataSetType.MASTER:
return ["datasets/master_dataset.html"]
elif self.object.type == DataSetType.DATACUT:
return ["datasets/data_cut_dataset.html"]
elif self._is_visualisation():
return ["datasets/visualisation_catalogue_item.html"]
raise RuntimeError(f"Unknown template for {self}")
def get_preview_limit(self, record_count):
return min([record_count, settings.REFERENCE_DATASET_PREVIEW_NUM_OF_ROWS])
@require_http_methods(["GET", "POST"])
def eligibility_criteria_view(request, dataset_uuid):
dataset = find_dataset(dataset_uuid, request.user)
if request.method == "POST":
form = EligibilityCriteriaForm(request.POST)
if form.is_valid():
access_request_id = form.cleaned_data.get("access_request")
if form.cleaned_data["meet_criteria"]:
url = reverse("request_access:dataset", args=[dataset_uuid])
if access_request_id:
url = reverse(
"request_access:dataset-request-update",
args=[access_request_id],
)
else:
url = reverse("datasets:eligibility_criteria_not_met", args=[dataset_uuid])
return HttpResponseRedirect(url)
return render(
request,
"eligibility_criteria.html",
{"dataset": dataset, "access_request": request.GET.get("access_request")},
)
@require_GET
def toggle_bookmark(request, dataset_uuid):
dataset = find_dataset(dataset_uuid, request.user)
dataset.toggle_bookmark(request.user)
return HttpResponseRedirect(dataset.get_absolute_url())
class ReferenceDatasetDownloadView(DetailView):
model = ReferenceDataset
def get_object(self, queryset=None):
return find_dataset(self.kwargs["dataset_uuid"], self.request.user, ReferenceDataset)
def get(self, request, *args, **kwargs):
dl_format = self.kwargs.get("format")
if dl_format not in ["json", "csv"]:
raise Http404
ref_dataset = self.get_object()
records = []
for record in ref_dataset.get_records():
record_data = {}
for field in ref_dataset.fields.all():
if field.data_type == ReferenceDatasetField.DATA_TYPE_FOREIGN_KEY:
relationship = getattr(record, field.relationship_name)
record_data[field.name] = (
getattr(
relationship,
field.linked_reference_dataset_field.column_name,
)
if relationship
else None
)
else:
record_data[field.name] = getattr(record, field.column_name)
records.append(record_data)
response = HttpResponse()
response["Content-Disposition"] = "attachment; filename={}-{}.{}".format(
ref_dataset.slug, ref_dataset.published_version, dl_format
)
log_event(
request.user,
EventLog.TYPE_REFERENCE_DATASET_DOWNLOAD,
ref_dataset,
extra={
"path": request.get_full_path(),
"reference_dataset_version": ref_dataset.published_version,
"download_format": dl_format,
},
)
ref_dataset.number_of_downloads = F("number_of_downloads") + 1
ref_dataset.save(update_fields=["number_of_downloads"])
if dl_format == "json":
response["Content-Type"] = "application/json"
response.write(json.dumps(list(records), cls=DjangoJSONEncoder))
else:
response["Content-Type"] = "text/csv"
with closing(io.StringIO()) as outfile:
writer = csv.DictWriter(
outfile,
fieldnames=ref_dataset.export_field_names,
quoting=csv.QUOTE_NONNUMERIC,
)
writer.writeheader()
writer.writerows(records)
response.write(outfile.getvalue()) # pylint: disable=no-member
return response
class SourceLinkDownloadView(DetailView):
model = SourceLink
def get(self, request, *args, **kwargs):
dataset = find_dataset(self.kwargs.get("dataset_uuid"), request.user)
if not dataset.user_has_access(self.request.user):
return HttpResponseForbidden()
source_link = get_object_or_404(
SourceLink, id=self.kwargs.get("source_link_id"), dataset=dataset
)
log_event(
request.user,
EventLog.TYPE_DATASET_SOURCE_LINK_DOWNLOAD,
source_link.dataset,
extra={
"path": request.get_full_path(),
**serializers.serialize("python", [source_link])[0],
},
)
dataset.number_of_downloads = F("number_of_downloads") + 1
dataset.save(update_fields=["number_of_downloads"])
if source_link.link_type == source_link.TYPE_EXTERNAL:
return HttpResponseRedirect(source_link.url)
client = get_s3_client()
try:
file_object = client.get_object(
Bucket=settings.AWS_UPLOADS_BUCKET, Key=source_link.url
)
except ClientError as ex:
try:
return HttpResponse(status=ex.response["ResponseMetadata"]["HTTPStatusCode"])
except KeyError:
return HttpResponseServerError()
response = StreamingHttpResponseWithoutDjangoDbConnection(
file_object["Body"].iter_chunks(chunk_size=65536),
content_type=file_object["ContentType"],
)
response["Content-Disposition"] = f'attachment; filename="{source_link.get_filename()}"'
response["Content-Length"] = file_object["ContentLength"]
return response
class SourceDownloadMixin:
pk_url_kwarg = "source_id"
event_log_type = None
@staticmethod
def db_object_exists(db_object):
raise NotImplementedError()
def get_table_data(self, db_object):
raise NotImplementedError()
def get(self, request, *_, **__):
dataset = find_dataset(self.kwargs.get("dataset_uuid"), request.user)
db_object = get_object_or_404(self.model, id=self.kwargs.get("source_id"), dataset=dataset)
if not db_object.dataset.user_has_access(self.request.user):
return HttpResponseForbidden()
if not self.db_object_exists(db_object):
return HttpResponseNotFound()
log_event(
request.user,
self.event_log_type,
db_object.dataset,
extra={
"path": request.get_full_path(),
**serializers.serialize("python", [db_object])[0],
},
)
dataset.number_of_downloads = F("number_of_downloads") + 1
dataset.save(update_fields=["number_of_downloads"])
return self.get_table_data(db_object)
class SourceViewDownloadView(SourceDownloadMixin, DetailView):
model = SourceView
event_log_type = EventLog.TYPE_DATASET_SOURCE_VIEW_DOWNLOAD
@staticmethod
def db_object_exists(db_object):
return view_exists(db_object.database.memorable_name, db_object.schema, db_object.view)
def get_table_data(self, db_object):
return table_data(
self.request.user.email,
db_object.database.memorable_name,
db_object.schema,
db_object.view,
db_object.get_filename(),
)
class CustomDatasetQueryDownloadView(DetailView):
model = CustomDatasetQuery
def get(self, request, *args, **kwargs):
dataset = find_dataset(self.kwargs.get("dataset_uuid"), request.user)
if not dataset.user_has_access(self.request.user):
return HttpResponseForbidden()
query = get_object_or_404(self.model, id=self.kwargs.get("query_id"), dataset=dataset)
if not query.reviewed and not request.user.is_superuser:
return HttpResponseForbidden()
log_event(
request.user,
EventLog.TYPE_DATASET_CUSTOM_QUERY_DOWNLOAD,
query.dataset,
extra={
"path": request.get_full_path(),
**serializers.serialize("python", [query])[0],
},
)
dataset.number_of_downloads = F("number_of_downloads") + 1
dataset.save(update_fields=["number_of_downloads"])
filtered_query = sql.SQL(query.query)
columns = request.GET.getlist("columns")
if columns:
trimmed_query = query.query.rstrip().rstrip(";")
filtered_query = sql.SQL("SELECT {fields} from ({query}) as data;").format(
fields=sql.SQL(",").join([sql.Identifier(column) for column in columns]),
query=sql.SQL(trimmed_query),
)
return streaming_query_response(
request.user.email,
query.database.memorable_name,
filtered_query,
query.get_filename(),
cursor_name=f"custom_query--{query.id}",
)
class DatasetPreviewView(DetailView, metaclass=ABCMeta):
@property
@abstractmethod
def model(self):
pass
@abstractmethod
def get_preview_data(self, dataset):
pass
def get(self, request, *args, **kwargs):
user = self.request.user
dataset = find_dataset(self.kwargs.get("dataset_uuid"), user)
if not dataset.user_has_access(user):
return HttpResponseForbidden()
source_object, columns, query = self.get_preview_data(dataset)
records = []
sample_size = settings.DATASET_PREVIEW_NUM_OF_ROWS
if columns:
rows = get_random_data_sample(
source_object.database.memorable_name,
sql.SQL(query),
sample_size,
)
for row in rows:
record_data = {}
for i, column in enumerate(columns):
record_data[column] = row[i]
records.append(record_data)
can_download = source_object.can_show_link_for_user(user)
return render(
request,
"datasets/dataset_preview.html",
{
"dataset": dataset,
"source_object": source_object,
"fields": columns,
"records": records,
"preview_limit": sample_size,
"record_count": len(records),
"fixed_table_height_limit": 10,
"truncate_limit": 100,
"can_download": can_download,
"type": source_object.type,
},
)
class SourceTablePreviewView(DatasetPreviewView):
model = SourceTable
def get_preview_data(self, dataset):
source_table_object = get_object_or_404(
self.model, id=self.kwargs.get("table_uuid"), dataset=dataset
)
database_name = source_table_object.database.memorable_name
table_name = source_table_object.table
schema_name = source_table_object.schema
columns = datasets_db.get_columns(database_name, schema=schema_name, table=table_name)
preview_query = f"""
select * from "{schema_name}"."{table_name}"
"""
return source_table_object, columns, preview_query
class CustomDatasetQueryPreviewView(DatasetPreviewView):
model = CustomDatasetQuery
def get_preview_data(self, dataset):
query_object = get_object_or_404(
self.model, id=self.kwargs.get("query_id"), dataset=dataset
)
if not query_object.reviewed and not self.request.user.is_superuser:
raise PermissionDenied()
database_name = query_object.database.memorable_name
columns = datasets_db.get_columns(database_name, query=query_object.query)
preview_query = query_object.query
return query_object, columns, preview_query
class SourceTableColumnDetails(View):
def get(self, request, dataset_uuid, table_uuid):
dataset = find_dataset(dataset_uuid, request.user, MasterDataset)
source_table = get_object_or_404(SourceTable, id=table_uuid, dataset=dataset)
columns = datasets_db.get_columns(
source_table.database.memorable_name,
schema=source_table.schema,
table=source_table.table,
include_types=True,
)
return render(
request,
"datasets/source_table_column_details.html",
context={
"dataset": dataset,
"source_table": source_table,
"columns": columns,
},
)
class ReferenceDatasetColumnDetails(View):
def get(self, request, dataset_uuid):
dataset = find_dataset(dataset_uuid, request.user, ReferenceDataset)
columns = datasets_db.get_columns(
dataset.external_database.memorable_name,
schema="public",
table=dataset.table_name,
include_types=True,
)
return render(
request,
"datasets/referencedataset_column_details.html",
context={"dataset": dataset, "columns": columns},
)
class ReferenceDatasetGridView(View):
def get(self, request, dataset_uuid):
dataset = find_dataset(dataset_uuid, request.user, ReferenceDataset)
return render(
request,
"datasets/reference_dataset_grid.html",
context={"model": dataset},
)
class RelatedDataView(View):
def get(self, request, dataset_uuid):
dataset = find_dataset(dataset_uuid, request.user)
if dataset.type == DataSetType.DATACUT:
form = RelatedMastersSortForm(request.GET)
elif dataset.type == DataSetType.MASTER:
form = RelatedDataCutsSortForm(request.GET)
else:
return HttpResponse(status=404)
if form.is_valid():
related_datasets = dataset.related_datasets(
order=form.cleaned_data.get("sort") or form.fields["sort"].initial
)
return render(
request,
"datasets/related_data.html",
context={
"dataset": dataset,
"related_data": related_datasets,
"form": form,
},
)
return HttpResponse(status=500)
class RelatedVisualisationsView(View):
def get(self, request, dataset_uuid):
dataset = find_dataset(dataset_uuid, request.user)
form = RelatedVisualisationsSortForm(request.GET)
if form.is_valid():
related_visualisations = dataset.related_visualisations.order_by(
form.cleaned_data.get("sort") or form.fields["sort"].initial
)
return render(
request,
"datasets/related_visualisations.html",
context={
"dataset": dataset,
"related_visualisations": related_visualisations,
"form": form,
},
)
return HttpResponse(status=500)
class DataCutPreviewView(WaffleFlagMixin, DetailView):
waffle_flag = settings.DATA_CUT_ENHANCED_PREVIEW_FLAG
template_name = "datasets/data_cut_preview.html"
def dispatch(self, request, *args, **kwargs):
if not self.get_object().dataset.user_has_access(self.request.user):
return HttpResponseForbidden()
return super().dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
return get_object_or_404(self.kwargs["model_class"], pk=self.kwargs["object_id"])
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
model = self.get_object()
ctx.update(
{
"can_download": model.can_show_link_for_user(self.request.user),
"form_action": model.get_absolute_url(),
"can_filter_columns": model.show_column_filter(),
"truncate_limit": 100,
"fixed_table_height_limit": 10,
}
)
if model.user_can_preview(self.request.user):
columns, records = model.get_preview_data()
ctx.update(
{
"columns": columns,
"records": records,
"preview_limit": min(
[len(records), settings.REFERENCE_DATASET_PREVIEW_NUM_OF_ROWS]
),
}
)
return ctx
class DatasetUsageHistoryView(View):
def get(self, request, dataset_uuid, **kwargs):
dataset = find_dataset(dataset_uuid, request.user, kwargs["model_class"])
if dataset.type == DataSetType.MASTER:
tables = list(dataset.sourcetable_set.values_list("table", flat=True))
return render(
request,
"datasets/dataset_usage_history.html",
context={
"dataset": dataset,
"event_description": "Queried",
"rows": ToolQueryAuditLogTable.objects.filter(table__in=tables)
.annotate(day=TruncDay("audit_log__timestamp"))
.annotate(email=F("audit_log__user__email"))
.annotate(object=F("table"))
.order_by("-day")
.values("day", "email", "object")
.annotate(count=Count("id"))[:100],
},
)
return render(
request,
"datasets/dataset_usage_history.html",
context={
"dataset": dataset,
"event_description": "Viewed"
if dataset.type == DataSetType.VISUALISATION
else "Downloaded",
"rows": dataset.events.filter(
event_type__in=[
EventLog.TYPE_DATASET_SOURCE_LINK_DOWNLOAD,
EventLog.TYPE_DATASET_CUSTOM_QUERY_DOWNLOAD,
EventLog.TYPE_VIEW_VISUALISATION_TEMPLATE,
EventLog.TYPE_VIEW_SUPERSET_VISUALISATION,
EventLog.TYPE_VIEW_QUICKSIGHT_VISUALISATION,
]
)
.annotate(day=TruncDay("timestamp"))
.annotate(email=F("user__email"))
.annotate(
object=Func(
F("extra"),
Value("fields"),
Value("name"),
function="jsonb_extract_path_text",
output_field=CharField(),
),
)
.order_by("-day")
.values("day", "email", "object")
.annotate(count=Count("id"))[:100],
},
)
class DataCutSourceDetailView(DetailView):
template_name = "datasets/data_cut_source_detail.html"
def dispatch(self, request, *args, **kwargs):
source = self.get_object()
if not source.data_grid_enabled:
raise DatasetPreviewDisabledError(source.dataset)
if not source.dataset.user_has_access(self.request.user):
raise DatasetPermissionDenied(source.dataset)
return super().dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
dataset = find_dataset(self.kwargs["dataset_uuid"], self.request.user)
return get_object_or_404(
self.kwargs["model_class"],
dataset=dataset,
pk=self.kwargs["object_id"],
)
class DataGridDataView(DetailView):
def _user_can_access(self):
source = self.get_object()
return source.dataset.user_has_access(self.request.user) and source.data_grid_enabled
def get_object(self, queryset=None):
dataset = find_dataset(self.kwargs.get("dataset_uuid"), self.request.user)
return get_object_or_404(
self.kwargs["model_class"],
dataset=dataset,
pk=self.kwargs["object_id"],
)
def dispatch(self, request, *args, **kwargs):
if not self._user_can_access():
return HttpResponseForbidden()
return super().dispatch(request, *args, **kwargs)
@staticmethod
def _get_rows(source, query, query_params):
with psycopg2.connect(
database_dsn(settings.DATABASES_DATA[source.database.memorable_name])
) as connection:
with connection.cursor(
name="data-grid-data",
cursor_factory=psycopg2.extras.RealDictCursor,
) as cursor:
cursor.execute(query, query_params)
return cursor.fetchall()
def post(self, request, *args, **kwargs):
source = self.get_object()
if request.GET.get("download"):
if not source.data_grid_download_enabled:
return JsonResponse({}, status=403)
filters = {}
for filter_data in [json.loads(x) for x in request.POST.getlist("filters")]:
filters.update(filter_data)
column_config = [
x
for x in source.get_column_config()
if x["field"] in request.POST.getlist("columns", [])
]
if not column_config:
return JsonResponse({}, status=400)
post_data = {
"filters": filters,
"limit": source.data_grid_download_limit,
"sortDir": request.POST.get("sortDir", "ASC"),
"sortField": request.POST.get("sortField", column_config[0]["field"]),
}
else:
post_data = json.loads(request.body.decode("utf-8"))
post_data["limit"] = min(post_data.get("limit", 100), 100)
column_config = source.get_column_config()
original_query = source.get_data_grid_query()
query, params = build_filtered_dataset_query(
original_query,
column_config,
post_data,
)
if request.GET.get("download"):
extra = {
"correlation_id": str(uuid.uuid4()),
**serializers.serialize("python", [source])[0],
}
log_event(
request.user,
EventLog.TYPE_DATASET_CUSTOM_QUERY_DOWNLOAD,
source.dataset,
extra=extra,
)
def write_metrics_to_eventlog(log_data):
logger.debug("write_metrics_to_eventlog %s", log_data)
log_data.update(extra)
log_event(
request.user,
EventLog.TYPE_DATASET_CUSTOM_QUERY_DOWNLOAD_COMPLETE,
source.dataset,
extra=log_data,
)
return streaming_query_response(
request.user.email,
source.database.memorable_name,
query,
request.POST.get("export_file_name", f"custom-{source.dataset.slug}-export.csv"),
params,
original_query,
write_metrics_to_eventlog,
cursor_name=f'data-grid--{self.kwargs["model_class"].__name__}--{source.id}',
)
records = self._get_rows(source, query, params)
return JsonResponse({"records": records})
class DatasetVisualisationPreview(View):
def _get_vega_definition(self, visualisation):
vega_definition = json.loads(visualisation.vega_definition_json)
if visualisation.query:
with psycopg2.connect(
database_dsn(settings.DATABASES_DATA[visualisation.database.memorable_name])
) as connection:
with connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
cursor.execute(visualisation.query)
data = cursor.fetchall()
try:
# vega-lite, 'data' is a dictionary
vega_definition["data"]["values"] = data
except TypeError:
# vega, 'data' is a list, and we support setting the query
# results as the first item
vega_definition["data"][0]["values"] = data
return vega_definition
def get(self, request, dataset_uuid, object_id, **kwargs):
model_class = kwargs["model_class"]
dataset = find_dataset(dataset_uuid, request.user, model_class)
if not dataset.user_has_access(request.user):
return HttpResponseForbidden()
visualisation = dataset.visualisations.get(id=object_id)
vega_definition = self._get_vega_definition(visualisation)
return JsonResponse(vega_definition)
class DatasetVisualisationView(View):
def get(self, request, dataset_uuid, object_id, **kwargs):
model_class = kwargs["model_class"]
dataset = find_dataset(dataset_uuid, self.request.user, model_class)
if not dataset.user_has_access(request.user):
return HttpResponseForbidden()
visualisation = dataset.visualisations.live().get(id=object_id)
return render(
request,
"datasets/visualisation.html",
context={"dataset_uuid": dataset_uuid, "visualisation": visualisation},
)
class CustomQueryColumnDetails(View):
def get(self, request, dataset_uuid, query_id):
dataset = find_dataset(dataset_uuid, self.request.user, DataCutDataset)
try:
query = CustomDatasetQuery.objects.get(id=query_id, dataset__id=dataset_uuid)
except CustomDatasetQuery.DoesNotExist:
return HttpResponse(status=404)
return render(
request,
"datasets/data_cut_column_details.html",
context={
"dataset": dataset,
"query": query,
"columns": datasets_db.get_columns(
query.database.memorable_name, query=query.query, include_types=True
),
},
)
class SourceChangelogView(WaffleFlagMixin, DetailView):
waffle_flag = settings.DATASET_CHANGELOG_PAGE_FLAG
template_name = "datasets/source_changelog.html"
context_object_name = "source"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["changelog"] = get_detailed_changelog(self.get_object())
return ctx
def get_object(self, queryset=None):
dataset = find_dataset(self.kwargs["dataset_uuid"], self.request.user)
if self.kwargs["model_class"] == ReferenceDataset:
return dataset
return get_object_or_404(
self.kwargs["model_class"],
dataset=dataset,
pk=self.kwargs["source_id"],
)
class DatasetChartView(WaffleFlagMixin, View):
waffle_flag = settings.CHART_BUILDER_PUBLISH_CHARTS_FLAG
def get_object(self):
dataset = find_dataset(
self.kwargs["dataset_uuid"], self.request.user, self.kwargs["model_class"]
)
return dataset.charts.get(id=self.kwargs["object_id"])
@csp_update(SCRIPT_SRC=["'unsafe-eval'", "blob:"], IMG_SRC=["blob:"])
def get(self, request, **kwargs):
chart = self.get_object()
if not chart.dataset.user_has_access(request.user):
return HttpResponseForbidden()
return render(
request,
"datasets/charts/chart.html",
context={
"chart": chart,
},
)
class DatasetChartDataView(DatasetChartView):
waffle_flag = settings.CHART_BUILDER_PUBLISH_CHARTS_FLAG
def get(self, request, **kwargs):
dataset_chart = self.get_object()
if not dataset_chart.dataset.user_has_access(request.user):
return HttpResponseForbidden()
chart = dataset_chart.chart
return JsonResponse(
{
"total_rows": chart.query_log.rows,
"duration": chart.query_log.duration,
"data": chart.get_table_data(chart.get_required_columns()),
}
)
class EditBaseView(View):
obj = None
summary: str = None
def dispatch(self, request, *args, **kwargs):
try:
dataset = DataSet.objects.live().get(pk=self.kwargs.get("pk"))
except DataSet.DoesNotExist:
dataset = None
try:
visualisation_catalogue_item = VisualisationCatalogueItem.objects.live().get(
pk=self.kwargs.get("pk")
)
except VisualisationCatalogueItem.DoesNotExist:
raise Http404 # pylint: disable=W0707
if "summary_id" in self.kwargs:
self.summary = get_object_or_404(
PendingAuthorizedUsers.objects.all(), pk=self.kwargs.get("summary_id")
)
self.obj = dataset or visualisation_catalogue_item
if (
request.user
not in [
self.obj.information_asset_owner,
self.obj.information_asset_manager,
]
and not request.user.is_superuser
):
return HttpResponseForbidden()
return super().dispatch(request, *args, **kwargs)
class DatasetEditView(EditBaseView, UpdateView):
model = DataSet
form_class = DatasetEditForm
template_name = "datasets/edit_dataset.html"
def get_success_url(self):
return self.object.get_absolute_url()
def get_initial(self):
return {
"enquiries_contact": self.object.enquiries_contact.email
if self.object.enquiries_contact
else "",
"authorized_email_domains": ",".join(self.object.authorized_email_domains),
}
def form_valid(self, form):
if "authorized_email_domains" in form.changed_data:
log_permission_change(
self.request.user,
self.object,
EventLog.TYPE_CHANGED_AUTHORIZED_EMAIL_DOMAIN,
{"authorized_email_domains": self.object.authorized_email_domains},
f"authorized_email_domains set to {self.object.authorized_email_domains}",
)
# As the dataset's access type has changed, clear cached credentials for all
# users to ensure they either:
# - lose access if it went from REQUIRES_AUTHENTICATION/OPEN to REQUIRES_AUTHORIZATION
# - get access if it went from REQUIRES_AUTHORIZATION to REQUIRES_AUTHENTICATION/OPEN
invalidate_data_explorer_user_cached_credentials()
invalidate_superset_user_cached_credentials()
messages.success(self.request, "Dataset updated")
return super().form_valid(form)
class VisualisationCatalogueItemEditView(EditBaseView, UpdateView):
model = VisualisationCatalogueItem
form_class = VisualisationCatalogueItemEditForm
template_name = "datasets/edit_visualisation_catalogue_item.html"
def get_success_url(self):
return self.object.get_absolute_url()
def get_initial(self):
return {
"enquiries_contact": self.object.enquiries_contact.email
if self.object.enquiries_contact
else "",
"secondary_enquiries_contact": self.object.secondary_enquiries_contact.email
if self.object.secondary_enquiries_contact
else "",
"authorized_email_domains": ",".join(self.object.authorized_email_domains),
}
def form_valid(self, form):
if "authorized_email_domains" in form.changed_data:
log_permission_change(
self.request.user,
self.object,
EventLog.TYPE_CHANGED_AUTHORIZED_EMAIL_DOMAIN,
{"authorized_email_domains": self.object.authorized_email_domains},
f"authorized_email_domains set to {self.object.authorized_email_domains}",
)
# As the dataset's access type has changed, clear cached credentials for all
# users to ensure they either:
# - lose access if it went from REQUIRES_AUTHENTICATION/OPEN to REQUIRES_AUTHORIZATION
# - get access if it went from REQUIRES_AUTHORIZATION to REQUIRES_AUTHENTICATION/OPEN
invalidate_data_explorer_user_cached_credentials()
invalidate_superset_user_cached_credentials()
messages.success(self.request, "Dataset updated")
return super().form_valid(form)
class UserSearchFormView(EditBaseView, FormView):
form_class = UserSearchForm
form: None
def form_valid(self, form):
self.form = form
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
search_query = self.request.GET.get("search_query")
if search_query:
email_filter = Q(email__icontains=search_query)
name_filter = Q(first_name__icontains=search_query) | Q(
last_name__icontains=search_query
)
users = get_user_model().objects.filter(Q(email_filter | name_filter))
context["search_results"] = users
context["search_query"] = search_query
context["obj"] = self.obj
context["obj_edit_url"] = (
reverse("datasets:edit_dataset", args=[self.obj.pk])
if isinstance(self.obj, DataSet)
else reverse("datasets:edit_visualisation_catalogue_item", args=[self.obj.pk])
)
return context
class DatasetEnquiriesContactSearchView(UserSearchFormView):
template_name = "datasets/search_enquiries_contact.html"
def get_success_url(self):
url = (
reverse(
"datasets:search_enquiries_contact",
args=[
self.obj.pk,
],
)
+ "?search_query="
+ self.form.cleaned_data["search"]
)
if self.request.GET.get("secondary_enquiries_contact"):
url = (
url
+ "&secondary_enquiries_contact="
+ self.request.GET.get("secondary_enquiries_contact")
)
return url
class DatasetSecondaryEnquiriesContactSearchView(UserSearchFormView):
template_name = "datasets/search_secondary_enquiries_contact.html"
def get_success_url(self):
url = (
reverse(
"datasets:search_secondary_enquiries_contact",
args=[
self.obj.pk,
],
)
+ "?search_query="
+ self.form.cleaned_data["search"]
)
if self.request.GET.get("enquiries_contact"):
url = url + "&enquiries_contact=" + self.request.GET.get("enquiries_contact")
return url
class DatasetEditPermissionsView(EditBaseView, View):
def dispatch(self, request, *args, **kwargs):
super().dispatch(request, *args, **kwargs)
if isinstance(self.obj, DataSet):
permissions = DataSetUserPermission.objects.filter(dataset=self.obj)
else:
permissions = VisualisationUserPermission.objects.filter(visualisation=self.obj)
users = json.dumps([p.user.id for p in permissions])
summary = PendingAuthorizedUsers.objects.create(created_by=request.user, users=users)
return HttpResponseRedirect(
reverse("datasets:edit_permissions_summary", args=[self.obj.id, summary.id])
)
class DatasetEditPermissionsSummaryView(EditBaseView, TemplateView):
template_name = "datasets/edit_permissions_summary.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["obj"] = self.obj
context["obj_edit_url"] = (
reverse("datasets:edit_dataset", args=[self.obj.pk])
if isinstance(self.obj, DataSet)
else reverse("datasets:edit_visualisation_catalogue_item", args=[self.obj.pk])
)
context["summary"] = self.summary
context["authorised_users"] = get_user_model().objects.filter(
id__in=json.loads(self.summary.users if self.summary.users else "[]")
)
return context
def post(self, request, *args, **kwargs):
authorized_users = set(
get_user_model().objects.filter(
id__in=json.loads(self.summary.users if self.summary.users else "[]")
)
)
if isinstance(self.obj, DataSet):
process_dataset_authorized_users_change(
authorized_users, request.user, self.obj, False, False, True
)
messages.success(request, "Dataset permissions updated")
return HttpResponseRedirect(reverse("datasets:edit_dataset", args=[self.obj.id]))
else:
process_visualisation_catalogue_item_authorized_users_change(
authorized_users, request.user, self.obj, False, False
)
messages.success(request, "Visualisation permissions updated")
return HttpResponseRedirect(
reverse("datasets:edit_visualisation_catalogue_item", args=[self.obj.id])
)
class DatasetAuthorisedUsersSearchView(UserSearchFormView):
template_name = "datasets/search_authorised_users.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["summary_id"] = self.kwargs.get("summary_id")
return context
def get_success_url(self):
return (
reverse(
"datasets:search_authorized_users",
args=[self.obj.pk, self.kwargs.get("summary_id")],
)
+ "?search_query="
+ self.form.cleaned_data["search"]
)
class DatasetAddAuthorisedUserView(EditBaseView, View):
def get(self, request, *args, **kwargs):
summary = PendingAuthorizedUsers.objects.get(id=self.kwargs.get("summary_id"))
user = get_user_model().objects.get(id=self.kwargs.get("user_id"))
users = json.loads(summary.users if summary.users else "[]")
if user.id not in users:
users.append(user.id)
summary.users = json.dumps(users)
summary.save()
return HttpResponseRedirect(
reverse(
"datasets:edit_permissions_summary",
args=[
self.obj.id,
self.kwargs.get("summary_id"),
],
)
)
class DatasetRemoveAuthorisedUserView(EditBaseView, View):
def get(self, request, *args, **kwargs):
summary = PendingAuthorizedUsers.objects.get(id=self.kwargs.get("summary_id"))
user = get_user_model().objects.get(id=self.kwargs.get("user_id"))
users = json.loads(summary.users if summary.users else "[]")
if user.id in users:
summary.users = json.dumps([user_id for user_id in users if user_id != user.id])
summary.save()
return HttpResponseRedirect(
reverse(
"datasets:edit_permissions_summary",
args=[
self.obj.id,
self.kwargs.get("summary_id"),
],
)
)
class SelectChartSourceView(WaffleFlagMixin, FormView):
waffle_flag = settings.CHART_BUILDER_BUILD_CHARTS_FLAG
form_class = ChartSourceSelectForm
template_name = "datasets/charts/select_chart_source.html"
def get_object(self, queryset=None):
dataset = find_dataset(self.kwargs["pk"], self.request.user, DataSet)
if not dataset.user_has_access(self.request.user):
raise DatasetPermissionDenied(dataset)
return dataset
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["dataset"] = self.get_object()
return ctx
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["dataset"] = self.get_object()
return kwargs
def form_valid(self, form):
dataset = self.get_object()
source_id = form.cleaned_data["source"]
source = dataset.get_related_source(source_id)
if source is None:
raise Http404
chart = ChartBuilderChart.objects.create_from_source(source, self.request.user)
run_chart_builder_query.delay(chart.id)
if source.data_grid_enabled:
return HttpResponseRedirect(
reverse("datasets:filter_chart_data", args=(dataset.id, source.id))
)
return HttpResponseRedirect(f"{chart.get_edit_url()}?prev={self.request.path}")
class FilterChartDataView(WaffleFlagMixin, DetailView):
waffle_flag = settings.CHART_BUILDER_BUILD_CHARTS_FLAG
form_class = ChartSourceSelectForm
template_name = "datasets/charts/filter_chart_data.html"
context_object_name = "source"
def get_object(self, queryset=None):
dataset = find_dataset(self.kwargs["pk"], self.request.user, DataSet)
if not dataset.user_has_access(self.request.user):
raise DatasetPermissionDenied(dataset)
source = dataset.get_related_source(self.kwargs["source_id"])
if source is None:
raise Http404
return source
class CreateGridChartView(WaffleFlagMixin, View):
waffle_flag = settings.CHART_BUILDER_BUILD_CHARTS_FLAG
def post(self, request, dataset_uuid, source_id, *args, **kwargs):
dataset = find_dataset(dataset_uuid, self.request.user)
source = dataset.get_related_source(source_id)
if source is None:
raise Http404
filters = {}
for filter_data in [json.loads(x) for x in request.POST.getlist("filters")]:
filters.update(filter_data)
column_config = [
x
for x in source.get_column_config()
if x["field"] in request.POST.getlist("columns", [])
]
post_data = {
"filters": filters,
"sortDir": request.POST.get("sortDir", "ASC"),
"sortField": request.POST.get("sortField", column_config[0]["field"]),
}
original_query = source.get_data_grid_query()
query, params = build_filtered_dataset_query(
original_query,
column_config,
post_data,
)
db_name = list(settings.DATABASES_DATA.items())[0][0]
with connections[db_name].cursor() as cursor:
full_query = cursor.mogrify(query, params).decode()
chart = ChartBuilderChart.objects.create_from_sql(str(full_query), request.user, db_name)
run_chart_builder_query.delay(chart.id)
return HttpResponseRedirect(
f"{chart.get_edit_url()}?prev={request.META.get('HTTP_REFERER')}"
)
class DatasetChartsView(WaffleFlagMixin, View):
waffle_flag = settings.CHART_BUILDER_PUBLISH_CHARTS_FLAG
@csp_update(SCRIPT_SRC=["'unsafe-eval'", "blob:"])
def get(self, request, **kwargs):
dataset = find_dataset(self.kwargs["dataset_uuid"], self.request.user, DataSet)
if not dataset.user_has_access(self.request.user):
return HttpResponseForbidden()
return render(
self.request,
"datasets/charts/charts.html",
context={"charts": dataset.related_charts(), "dataset": dataset},
)
def find_data_dictionary_view(request, schema_name, table_name):
query = SourceTable.objects.filter(schema=schema_name, table=table_name)
if not query.exists():
raise Http404
return redirect("datasets:data_dictionary", source_uuid=query.first().id)
class DataDictionaryBaseView(View):
def get_dictionary(self, source_table):
columns = datasets_db.get_columns(
source_table.database.memorable_name,
schema=source_table.schema,
table=source_table.table,
include_types=True,
)
fields = source_table.field_definitions.all()
dictionary = []
for name, data_type in columns:
definition = ""
if fields.filter(field=name).exists():
definition = fields.filter(field=name).first()
dictionary.append(
(
name,
data_type,
definition,
)
)
return columns, fields, dictionary
class DataDictionaryView(DataDictionaryBaseView):
def get(self, request, source_uuid):
source_table = get_object_or_404(SourceTable, pk=source_uuid)
dataset = None
if request.GET.get("dataset_uuid"):
dataset = find_dataset(request.GET.get("dataset_uuid"), self.request.user, DataSet)
columns, fields, dictionary = self.get_dictionary(source_table)
return render(
request,
"datasets/data_dictionary.html",
context={
"source_table": source_table,
"dataset": dataset,
"columns": columns,
"fields": fields,
"dictionary": dictionary,
},
)
class DataDictionaryEditView(DataDictionaryBaseView):
def dispatch(self, request, *args, **kwargs):
dataset = DataSet.objects.live().get(pk=self.kwargs.get("dataset_uuid"))
if (
request.user
not in [
dataset.information_asset_owner,
dataset.information_asset_manager,
]
and not request.user.is_superuser
):
return HttpResponseForbidden()
return super().dispatch(request, *args, **kwargs)
def get(self, request, dataset_uuid, source_uuid):
source_table = get_object_or_404(SourceTable, pk=source_uuid)
dataset = find_dataset(dataset_uuid, self.request.user, DataSet)
columns, fields, dictionary = self.get_dictionary(source_table)
return render(
request,
"datasets/edit_data_dictionary.html",
context={
"source_table": source_table,
"dataset": dataset,
"columns": columns,
"fields": fields,
"dictionary": dictionary,
},
)
def post(self, request, dataset_uuid, source_uuid):
source_table = get_object_or_404(SourceTable, pk=source_uuid)
dataset = find_dataset(dataset_uuid, self.request.user, DataSet)
for name, value in request.POST.items():
if name == "csrfmiddlewaretoken":
continue
field, _ = SourceTableFieldDefinition.objects.get_or_create(
source_table=source_table, field=name
)
field.description = value[:1024]
field.save()
messages.success(self.request, "Changes saved successfully")
redirect_url = (
reverse("datasets:data_dictionary", args=[source_table.id])
+ "?dataset_uuid="
+ str(dataset.id)
)
return redirect(redirect_url)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerProbesOperations(object):
"""LoadBalancerProbesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.LoadBalancerProbeListResult"]
"""Gets all the load balancer probes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerProbeListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_05_01.models.LoadBalancerProbeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancerProbeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerProbeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
probe_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.Probe"
"""Gets load balancer probe.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param probe_name: The name of the probe.
:type probe_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Probe, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.Probe
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Probe"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'probeName': self._serialize.url("probe_name", probe_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Probe', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'} # type: ignore
|
# -*- coding: utf-8 -*-
# Copyright © 2015 Tiger Computing Ltd
# This file is part of pytiger and distributed under the terms
# of a BSD-like license
# See the file COPYING for details
# shortcut: from pytiger.nagios import NagiosCheck
from .nagioscheck import NagiosCheck # noqa: F401
|
import re
import subprocess
def get(value, regex):
output = subprocess.getoutput(value)
r = re.search(regex, output)
if r and len(r.groups()) > 0:
return r.groups()[0]
return None
|
import dash_bootstrap_components as dbc
from dash_extensions.enrich import Input, Output
from dash import html, dcc
def layout(*args, **kwargs):
return dbc.Container([
dbc.Row(html.Br()),
dbc.Row(dcc.Input(id="input"), justify="around"),
dbc.Row(html.Div(id="output"), justify="around"),
], fluid=True)
def callbacks(app):
@app.callback(Output("output", "children"), [Input("input", "value")])
def hello(value):
return f"MODULE says: Hello {value}!"
|
"""
Manage transfers from arbitrary URLs to temporary files. Socket interface for
IPC with multiple process configurations.
"""
import os, subprocess, socket, logging, threading
from galaxy import eggs
from galaxy.util import listify, json
log = logging.getLogger( __name__ )
class TransferManager( object ):
"""
Manage simple data transfers from URLs to temporary locations.
"""
def __init__( self, app ):
self.app = app
self.sa_session = app.model.context.current
self.command = 'python %s' % os.path.abspath( os.path.join( os.getcwd(), 'scripts', 'transfer.py' ) )
if app.config.get_bool( 'enable_job_recovery', True ):
# Only one Galaxy server process should be able to recover jobs! (otherwise you'll have nasty race conditions)
self.running = True
self.sleeper = Sleeper()
self.restarter = threading.Thread( target=self.__restarter )
self.restarter.start()
def new( self, path=None, **kwd ):
if 'protocol' not in kwd:
raise Exception( 'Missing required parameter "protocol".' )
protocol = kwd[ 'protocol' ]
if protocol in [ 'http', 'https' ]:
if 'url' not in kwd:
raise Exception( 'Missing required parameter "url".' )
elif protocol == 'scp':
# TODO: add more checks here?
if 'sample_dataset_id' not in kwd:
raise Exception( 'Missing required parameter "sample_dataset_id".' )
if 'file_path' not in kwd:
raise Exception( 'Missing required parameter "file_path".' )
transfer_job = self.app.model.TransferJob( state=self.app.model.TransferJob.states.NEW, params=kwd )
self.sa_session.add( transfer_job )
self.sa_session.flush()
return transfer_job
def run( self, transfer_jobs ):
"""
This method blocks, so if invoking the transfer manager ever starts
taking too long, we should move it to a thread. However, the
transfer_manager will either daemonize or return after submitting to a
running daemon, so it should be fairly quick to return.
"""
transfer_jobs = listify( transfer_jobs )
printable_tj_ids = ', '.join( [ str( tj.id ) for tj in transfer_jobs ] )
log.debug( 'Initiating transfer job(s): %s' % printable_tj_ids )
# Set all jobs running before spawning, or else updating the state may
# clobber a state change performed by the worker.
[ tj.__setattr__( 'state', tj.states.RUNNING ) for tj in transfer_jobs ]
self.sa_session.add_all( transfer_jobs )
self.sa_session.flush()
for tj in transfer_jobs:
params_dict = tj.params
protocol = params_dict[ 'protocol' ]
# The transfer script should daemonize fairly quickly - if this is
# not the case, this process will need to be moved to a
# non-blocking method.
cmd = '%s %s' % ( self.command, tj.id )
log.debug( 'Transfer command is: %s' % cmd )
p = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
p.wait()
output = p.stdout.read( 32768 )
if p.returncode != 0:
log.error( 'Spawning transfer job failed: %s: %s' % ( tj.id, output ) )
tj.state = tj.states.ERROR
tj.info = 'Spawning transfer job failed: %s' % output.splitlines()[-1]
self.sa_session.add( tj )
self.sa_session.flush()
def get_state( self, transfer_jobs, via_socket=False ):
transfer_jobs = listify( transfer_jobs )
rval = []
for tj in transfer_jobs:
if via_socket and tj.state not in tj.terminal_states and tj.socket:
try:
request = json.jsonrpc_request( method='get_state', id=True )
sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
sock.settimeout( 5 )
sock.connect( ( 'localhost', tj.socket ) )
sock.send( json.to_json_string( request ) )
response = sock.recv( 8192 )
valid, response = json.validate_jsonrpc_response( response, id=request['id'] )
if not valid:
# No valid response received, make some pseudo-json-rpc
raise Exception( dict( code=128, message='Did not receive valid response from transfer daemon for state' ) )
if 'error' in response:
# Response was valid but Request resulted in an error
raise Exception( response['error'])
else:
# Request was valid
response['result']['transfer_job_id'] = tj.id
rval.append( response['result'] )
except Exception, e:
# State checking via the transfer daemon failed, just
# return the state from the database instead. Callers can
# look for the 'error' member of the response to see why
# the check failed.
self.sa_session.refresh( tj )
error = e.args
if type( error ) != dict:
error = dict( code=256, message='Error connecting to transfer daemon', data=str( e ) )
rval.append( dict( transfer_job_id=tj.id, state=tj.state, error=error ) )
else:
self.sa_session.refresh( tj )
rval.append( dict( transfer_job_id=tj.id, state=tj.state ) )
for tj_state in rval:
if tj_state['state'] in self.app.model.TransferJob.terminal_states:
log.debug( 'Transfer job %s is in terminal state: %s' % ( tj_state['transfer_job_id'], tj_state['state'] ) )
elif tj_state['state'] == self.app.model.TransferJob.states.PROGRESS and 'percent' in tj_state:
log.debug( 'Transfer job %s is %s%% complete' % ( tj_state[ 'transfer_job_id' ], tj_state[ 'percent' ] ) )
if len( rval ) == 1:
return rval[0]
return rval
def __restarter( self ):
log.info( 'Transfer job restarter starting up...' )
while self.running:
dead = []
self.sa_session.expunge_all() # our session is threadlocal so this is safe.
for tj in self.sa_session.query( self.app.model.TransferJob ) \
.filter( self.app.model.TransferJob.state == self.app.model.TransferJob.states.RUNNING ):
if not tj.pid:
continue
# This will only succeed if the process exists and is owned by the
# user running Galaxy (unless that user is root, in which case it
# can be owned by anyone - but you're not running Galaxy as root,
# right?). This is not guaranteed proof that the transfer is alive
# since another process may have assumed the original process' PID.
# But that will only cause the transfer to not restart until that
# process dies, which hopefully won't be too long from now... If
# it becomes a problem, try to talk to the socket a few times and
# restart the transfer if socket communication fails repeatedly.
try:
os.kill( tj.pid, 0 )
except:
self.sa_session.refresh( tj )
if tj.state == tj.states.RUNNING:
log.error( 'Transfer job %s is marked as running but pid %s appears to be dead.' % ( tj.id, tj.pid ) )
dead.append( tj )
if dead:
self.run( dead )
self.sleeper.sleep( 30 )
log.info( 'Transfer job restarter shutting down...' )
def shutdown( self ):
self.running = False
self.sleeper.wake()
class Sleeper( object ):
"""
Provides a 'sleep' method that sleeps for a number of seconds *unless*
the notify method is called (from a different thread).
"""
def __init__( self ):
self.condition = threading.Condition()
def sleep( self, seconds ):
self.condition.acquire()
self.condition.wait( seconds )
self.condition.release()
def wake( self ):
self.condition.acquire()
self.condition.notify()
self.condition.release()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import cv2
import json
import copy
import numpy as np
from opts import opts
from detector import Detector
from tools.accum_coco import AccumCOCODetResult
import pickle
image_ext = ['jpg', 'jpeg', 'png', 'webp']
video_ext = ['mp4', 'mov', 'avi', 'mkv']
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge', 'display']
def demo(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.debug = max(opt.debug, 1)
detector = Detector(opt)
accum_coco = AccumCOCODetResult()
if opt.demo == 'webcam' or \
opt.demo[opt.demo.rfind('.') + 1:].lower() in video_ext:
is_video = True
# demo on video stream
cam = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo)
else:
is_video = False
# Demo on images sequences
if os.path.isdir(opt.demo):
image_names = []
ls = os.listdir(opt.demo)
for file_name in sorted(ls):
ext = file_name[file_name.rfind('.') + 1:].lower()
if ext in image_ext:
image_names.append(os.path.join(opt.demo, file_name))
else:
image_names = [opt.demo]
# Initialize output video
out = None
out_name = opt.demo[opt.demo.rfind('/') + 1:]
if opt.save_video:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('../results/{}.mp4'.format(
opt.exp_id + '_' + out_name),fourcc, opt.save_framerate, (
opt.video_w, opt.video_h))
if opt.debug < 5:
detector.pause = False
cnt = 0
results = {}
while True:
if is_video:
_, img = cam.read()
if img is None:
save_and_exit(opt, accum_coco, out, results, out_name)
else:
if cnt < len(image_names):
img = cv2.imread(image_names[cnt])
else:
save_and_exit(opt, accum_coco, out, results, out_name)
# resize the original video for saving video results
if opt.resize_video:
img = cv2.resize(img, (opt.video_w, opt.video_h))
# skip the first X frames of the video
if cnt < opt.skip_first:
continue
# cv2.imshow('input', img)
# track or detect the image.
ret = detector.run(img)
# log run time
time_str = 'frame {} |'.format(cnt)
for stat in time_stats:
time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
print(time_str)
# results[cnt] is a list of dicts:
# [{'bbox': [x1, y1, x2, y2], 'tracking_id': id, 'category_id': c, ...}]
results[cnt] = ret['results']
accum_coco.add_det_to_coco(cnt, ret['results'])
# save debug image to video
if opt.save_video:
out.write(ret['generic'])
if not is_video:
cv2.imwrite('../results/demo{}.jpg'.format(cnt), ret['generic'])
# esc to quit and finish saving video
# if cv2.waitKey(1) == 27:
# save_and_exit(opt, out, results, out_name)
# return
cnt += 1
save_and_exit(opt, accum_coco, out, results)
def save_and_exit(opt, accum_coco, out=None, results=None, out_name=''):
if opt.record_mAP:
save_dict = {}
save_dict['full_res_pred'] = accum_coco.get_dt()
pickle.dump(save_dict, open(opt.save_dir + '/raw_save_dict.pkl', 'wb'))
if opt.save_results and (results is not None):
save_dir = '../results/{}_results.json'.format(opt.exp_id + '_' + out_name)
print('saving results to', save_dir)
json.dump(_to_list(copy.deepcopy(results)), open(save_dir, 'w'))
if opt.save_video and out is not None:
out.release()
sys.exit(0)
def _to_list(results):
for img_id in results:
for t in range(len(results[img_id])):
for k in results[img_id][t]:
if isinstance(results[img_id][t][k], (np.ndarray, np.float32)):
results[img_id][t][k] = results[img_id][t][k].tolist()
return results
if __name__ == '__main__':
opt = opts().init()
demo(opt)
|
#
# Copyright 2019 The Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import Iterable
from fate_arch._standalone import Session
from fate_arch.abc import AddressABC, CSessionABC
from fate_arch.common.base_utils import fate_uuid
from fate_arch.common.log import getLogger
from fate_arch.computing.standalone._table import Table
LOGGER = getLogger()
class CSession(CSessionABC):
def __init__(self, session_id: str):
self._session = Session(session_id)
def get_standalone_session(self):
return self._session
def load(self, address: AddressABC, partitions: int, schema: dict, **kwargs):
from fate_arch.common.address import StandaloneAddress
from fate_arch.storage import StandaloneStorageType
if isinstance(address, StandaloneAddress):
raw_table = self._session.load(address.name, address.namespace)
if address.storage_type != StandaloneStorageType.ROLLPAIR_IN_MEMORY:
raw_table = raw_table.save_as(name=f"{address.name}_{fate_uuid()}",
namespace=address.namespace, partition=partitions,
need_cleanup=True)
table = Table(raw_table)
table.schema = schema
return table
from fate_arch.common.address import FileAddress
if isinstance(address, FileAddress):
return address
raise NotImplementedError(f"address type {type(address)} not supported with standalone backend")
def parallelize(self, data: Iterable, partition: int, include_key: bool, **kwargs):
table = self._session.parallelize(data=data, partition=partition, include_key=include_key, **kwargs)
return Table(table)
def cleanup(self, name, namespace):
return self._session.cleanup(name=name, namespace=namespace)
def stop(self):
return self._session.stop()
def kill(self):
return self._session.kill()
@property
def session_id(self):
return self._session.session_id
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
# model file: example-models/ARM/Ch.12/radon_no_pool.stan
import torch
import pyro
import pyro.distributions as dist
def init_vector(name, dims=None):
return pyro.sample(name, dist.Normal(torch.zeros(dims), 0.2 * torch.ones(dims)).to_event(1))
def validate_data_def(data):
assert 'N' in data, 'variable not found in data: key=N'
assert 'J' in data, 'variable not found in data: key=J'
assert 'county' in data, 'variable not found in data: key=county'
assert 'x' in data, 'variable not found in data: key=x'
assert 'y' in data, 'variable not found in data: key=y'
# initialize data
N = data["N"]
J = data["J"]
county = data["county"]
x = data["x"]
y = data["y"]
def init_params(data):
params = {}
return params
def model(data, params):
# initialize data
N = data["N"]
J = data["J"]
county = data["county"].long() - 1
x = data["x"]
y = data["y"]
# model block
mu_a = pyro.sample("mu_a", dist.Normal(0., 1.))
sigma_a = pyro.sample("sigma_a", dist.HalfCauchy(2.5))
with pyro.plate("J", J):
a = pyro.sample("a", dist.Normal(mu_a, sigma_a))
beta = pyro.sample("beta", dist.Normal(0., 1.))
sigma_y = pyro.sample("sigma_y", dist.HalfCauchy(2.5))
with pyro.plate("data", N):
y_hat = beta * x + a[county]
y = pyro.sample('y', dist.Normal(y_hat, sigma_y), obs=y)
|
"""
=================================================
Compare evoked responses for different conditions
=================================================
In this example, an Epochs object for visual and
auditory responses is created. Both conditions
are then accessed by their respective names to
create a sensor layout plot of the related
evoked responses.
"""
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import plot_evoked_topo
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: MEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
# bad channels in raw.info['bads'] will be automatically excluded
# Set up amplitude-peak rejection values for MEG channels
reject = dict(grad=4000e-13, mag=4e-12)
# pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
include=include, exclude='bads')
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), reject=reject)
# Generate list of evoked objects from conditions names
evokeds = [epochs[name].average() for name in ('left', 'right')]
###############################################################################
# Show topography for two different conditions
colors = 'blue', 'red'
title = 'MNE sample data\nleft vs right (A/V combined)'
plot_evoked_topo(evokeds, color=colors, title=title, background_color='w')
plt.show()
|
from pybabelfy.babelfy import *
text = "BabelNet is both a multilingual encyclopedic dictionary and a semantic network"
lang = "EN"
# This only works for the demo example. Change it for your RESTful key (you must register at babelfy.org for it)
key = "5e962130-b37f-4105-8512-4c97b4f3cb30"
babelapi = Babelfy()
semantic_annotations = babelapi.disambiguate(text, lang, key)
|
# Generated by Django 2.0.2 on 2018-02-20 22:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('topology', '0003_link_status_and_openvpn_parser'),
]
operations = [
migrations.AlterField(
model_name='link',
name='target',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='target_link_set', to='topology.Node'),
),
]
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views import defaults as default_views
from django.views.generic import TemplateView
from drf_spectacular.views import SpectacularAPIView, SpectacularSwaggerView
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
path(
"about/", TemplateView.as_view(template_name="pages/about.html"), name="about"
),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path("users/", include("scrape_imdb.users.urls", namespace="users")),
path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# API URLS
urlpatterns += [
# API base url
path("api/", include("config.api_router")),
# DRF auth token
path("auth-token/", obtain_auth_token),
path("api/schema/", SpectacularAPIView.as_view(), name="api-schema"),
path(
"api/docs/",
SpectacularSwaggerView.as_view(url_name="api-schema"),
name="api-docs",
),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""map_file_to_code"""
import os
import argparse
class ParseIrInfo:
"""
Parse and return the operation info from ir file.
"""
def __init__(self, ir_file):
self.no_in_file_operation = []
self.ir_file_path = self.ir_path_parse(ir_file)
self.operation_info_dict = self.ir_info_parse()
def __len__(self):
return len(self.operation_info_dict)
def ir_path_parse(self, ir_file):
"""
parse the map file path.
"""
if ir_file == "":
print("[WARNING] No graph_path parameter, use current path as graph path.")
ir_file = os.path.abspath(os.path.dirname(__file__))
map_ir_file = ""
file_size = 0
map_ir_filename = "trace_code_graph"
for filename in os.listdir(os.path.join(ir_file)):
if map_ir_filename not in filename:
continue
tmp_file = os.path.join(ir_file, filename)
tmp_file_size = os.path.getsize(tmp_file)
if tmp_file_size >= file_size:
file_size = tmp_file_size
map_ir_file = tmp_file
if map_ir_file == "":
exit("[ERROR] Please set \"save_graphs=True\" in context to save {} file!".format(map_ir_filename))
return map_ir_file
def ir_info_parse(self):
"""
parse the ir file and save code line corresponding to the operator
"""
all_op_info_dict = {} # recode all operation info
single_op_info_dict = {} # recode single operation info
op_start_char_flag = False # Start operator fragment
op_end_char_flag = False # End of operator fragment
op_start_info_num = 0 # Accumulate the num to recode operation
operation_line = 0 # The line number of the operator
op_start_line_num = 0 # The line number of starting operator information
op_start_info_flag = False # Start operator information
with open(self.ir_file_path, 'r+') as file:
txt_context_list = file.readlines()
for line_num, txt_context in enumerate(txt_context_list):
txt_context = txt_context.strip()
# Start operator fragment
if txt_context.endswith(") {"):
op_start_char_flag = True
op_end_char_flag = False
# End of operator fragment
if txt_context == "}":
op_end_char_flag = True
# Determine whether it is operator information
if txt_context.startswith("%") and ") = " in txt_context and txt_context[1].isdigit():
op_start_info_flag = True
op_start_line_num = line_num
op_start_info_num += 1
single_op_info_dict = {"in_file": []}
# Judge and start to recode operation info
if op_start_char_flag and not op_end_char_flag and op_start_info_flag and line_num != op_start_line_num:
if "-op" in txt_context and txt_context.split("-op")[-1].split(")")[0].isdigit():
single_op_info_dict["origin_op_name"] = txt_context.split("-op")[0].split("/")[-1]
single_op_info_dict["op_name"] = txt_context.split("-op")[0].split("/")[-1].lower()
single_op_info_dict["op_num"] = "op" + txt_context.split("-op")[-1].split(")")[0]
operation_line = line_num
if "In file" in txt_context:
in_file_info = txt_context.split("#")[-1].strip().rstrip("/")
single_op_info_dict["in_file"].append(in_file_info)
if line_num - operation_line == 1 and "In file" not in txt_context and "op_num" in single_op_info_dict:
self.no_in_file_operation.append(single_op_info_dict["op_num"])
op_start_info_flag = False
all_op_info_dict[op_start_info_num] = single_op_info_dict
return all_op_info_dict
class MapOperationToLine:
"""
to show operation info
"""
def __init__(self, dump_op, ir_info_dict):
self.dump_op = dump_op
self.ir_info_dict = ir_info_dict
def show_operator_info(self):
"""
find operator
"""
origin_dump_op_name = self.dump_op.split("-")[0]
dump_op_name = origin_dump_op_name.lower()
dump_op_num = self.dump_op.split("-")[-1]
for _, op_info in self.ir_info_dict.items():
if op_info["op_num"] == dump_op_num and op_info["in_file"] is not None:
if dump_op_name in (dump_op_num, op_info["op_name"]):
if not op_info["in_file"]:
print("[WARNING] Cannot find {}'s source code in ir file.".format(op_info["origin_op_name"]))
return False
print("[INFO] Find operation '{}'.".format(op_info["origin_op_name"]))
for line in op_info["in_file"]:
print(" {}".format(line.split(" ")[0]))
print(" {}".format(line.split(" ")[-1]))
return True
print("[WARNING] Cannot find operation {}'s in ir file.".format(origin_dump_op_name))
return False
def start_find(dump_op, map_code_file):
"""
start find error operation in code.
"""
print("[INFO] Start to map the dump file to source code.")
ir_op_info_dict = ParseIrInfo(map_code_file).operation_info_dict
MapOperationToLine(dump_op, ir_op_info_dict).show_operator_info()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Find the dump operator in the user code')
parser.add_argument('--graph_path', '-p', type=str.lower, default="", help='Save graph files path (option)')
parser.add_argument('--dump_op', '-o', type=str.lower, default="", required=True,
help="Dump operator id, case insensitive, such as 'op3352'.")
args_opt = parser.parse_args()
start_find(args_opt.dump_op, args_opt.graph_path)
|
import asyncio
import logging
import os
import signal
from typing import (
AsyncIterable,
AsyncIterator,
Awaitable,
Callable,
Tuple,
)
from async_generator import asynccontextmanager
from async_timeout import timeout
class AsyncProcessRunner():
logger = logging.getLogger("trinity.tools.async_process_runner.AsyncProcessRunner")
proc: asyncio.subprocess.Process
@classmethod
@asynccontextmanager
async def run(cls,
cmds: Tuple[str, ...],
timeout_sec: int = 10) -> AsyncIterator['AsyncProcessRunner']:
try:
async with timeout(timeout_sec):
runner = cls()
runner.proc = await asyncio.create_subprocess_exec(
*cmds,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
stdin=asyncio.subprocess.PIPE,
# We need this because Trinity spawns multiple processes and we need to
# take down the entire group of processes.
preexec_fn=os.setsid,
)
yield runner
runner.kill()
except asyncio.TimeoutError:
runner.kill()
@property
async def stdout(self) -> AsyncIterable[str]:
async for line in self._iterate_until_empty(self.proc.stdout.readline):
yield line
@property
async def stderr(self) -> AsyncIterable[str]:
async for line in self._iterate_until_empty(self.proc.stderr.readline):
yield line
async def _iterate_until_empty(
self,
awaitable_bytes_fn: Callable[[], Awaitable[bytes]]) -> AsyncIterable[str]:
while True:
try:
line = await awaitable_bytes_fn()
except asyncio.CancelledError:
# Return to keep the consumer of the AsyncIterable running
return
self.logger.debug(line)
if line == b'':
return
else:
yield line.decode('utf-8')
def kill(self, sig: int = signal.SIGKILL) -> None:
try:
os.killpg(os.getpgid(self.proc.pid), sig)
except ProcessLookupError:
self.logger.info("Process %s has already disappeared", self.proc.pid)
|
from easyquant import StrategyTemplate
# from easyquant import RedisIo
from easyquant import DataUtil
from threading import Thread, current_thread, Lock
import json
# import redis
import time
# import datetime
from datetime import datetime, date
import pandas as pd
# import pymongo
from QUANTAXIS.QAFetch import QATdx as tdx
import pika
from easyquant import EasyMq
from easyquant import MongoIo
from easyquant import EasyTime
from multiprocessing import Process, Pool, cpu_count, Manager
from concurrent.futures import ProcessPoolExecutor,ThreadPoolExecutor,as_completed
# calc_thread_dict = Manager().dict()
data_buf_day = Manager().dict()
data_buf_5min = Manager().dict()
data_buf_5min_0 = Manager().dict()
mongo = MongoIo()
easytime=EasyTime()
executor = ThreadPoolExecutor(max_workers=cpu_count() * 50)
def do_init_data_buf(code, idx):
freq = 5
# mongo = MongoIo()
if idx == 0:
data_day = mongo.get_stock_day(code=code)
# data_min = mongo.get_stock_min_realtime(code=code, freq=freq)
data_min = mongo.get_stock_min(code=code, freq=freq)
else:
data_day = mongo.get_index_day(code=code)
# data_min = mongo.get_index_min_realtime(code=code)
data_min = mongo.get_index_min(code=code)
## TODO fuquan
data_buf_day[code] = data_day
data_buf_5min[code] = data_min
# print("do-init data end, code=%s, data-buf size=%d " % (code, len(data_buf_5min)))
def do_init_data_ext(code_list, idx):
pass
# class UpdateDataThread(Thread):
# def __init__(self, code, idx):
# Thread.__init__(self)
# self.code = code
# self.idx = idx
#
# def run(self):
# do_init_data_buf(self.code, self.idx)
class calcStrategy(Thread):
def __init__(self, code, data, log, idx):
Thread.__init__(self)
self._data = data
self.code = code
self.log = log
# self.redis = redis
self.idx = idx
# self.last_time = None
# self.working = False
# def set_data(self, code, data, idx):
# Thread.__init__(self)
# self._data = data
# self.code = code
# self.log = log
# # self.redis = redis
def upd_min(self, minute):
# index_time =pd.to_datetime(easytime.get_minute_date(minute=5))
index_time = pd.to_datetime(easytime.get_minute_date_str(minute=minute, str_date=self._data['datetime']))
df_min = data_buf_5min[self.code]
# if self.code == '600822':
# print("0 code=%s, data=%d" % (self.code, len(df_min)))
if len(df_min) > 0:
## TODO 计算单日前的数据
# old_vol = 0
# old_amount = 0
beg_time = pd.to_datetime(easytime.get_minute_date_str(minute=minute, str_date=self._data['datetime']))
sum_df=df_min.loc[df_min.index > beg_time]
old_vol = sum_df['vol'].sum()
old_amount = sum_df['amount'].sum()
# if self.code == '600822':
# print("1 code=%s, data=%d" % (self.code, len(df_min)))
now_price = self._data['now']
if index_time in df_min.index:
# if self.code == '600822':
# print("10 code=%s, data=%s" % (self.code, index_time))
# df_min.loc[index_time, 'open'] = now_price
if now_price > df_min.loc[index_time, 'high']:
df_min.loc[index_time, 'high'] = now_price
if now_price < df_min.loc[index_time, 'low']:
df_min.loc[index_time, 'low'] = now_price
df_min.loc[index_time, 'close'] = now_price
df_min.loc[index_time, 'vol'] = self._data['volume'] - old_vol
df_min.loc[index_time, 'amount'] = self._data['amount'] - old_amount
else:
# if self.code == '600822':
# print("2 code=%s, data=%d" % (self.code, len(df_min)))
df_min.loc[index_time] = [0 for x in range(len(df_min.columns))]
df_min.loc[index_time, 'open'] = now_price
df_min.loc[index_time, 'high'] = now_price
df_min.loc[index_time, 'low'] = now_price
df_min.loc[index_time, 'close'] = now_price
df_min.loc[index_time, 'vol'] = self._data['volume'] - old_vol
df_min.loc[index_time, 'amount'] = self._data['amount'] - old_amount
## vol 计算
else: ##first day ???
pass
data_buf_5min[self.code] = df_min
def run(self):
# if self.working:
# return
# self.working = True
now_price = self._data['now']
now_vol = self._data['volume']
last_time = pd.to_datetime(self._data['datetime'][0:10])
# print("code=%s, data=%s" % (self.code, self._data['datetime']))
df_day = data_buf_day[self.code]
df_day.loc[last_time]=[0 for x in range(len(df_day.columns))]
df_day.loc[last_time,'open'] = self._data['open']
df_day.loc[last_time,'high']= self._data['high']
df_day.loc[last_time,'low'] = self._data['low']
df_day.loc[last_time,'close'] = now_price
df_day.loc[last_time,'vol'] = self._data['volume']
df_day.loc[last_time,'amount'] = self._data['amount']
# df=pd.concat([MA(df_day.close, x) for x in (5,10,20,30,60,90,120,250,500,750,1000,1500,2000,2500,) ], axis = 1)[-1:]
# df.columns = [u'm5',u'm10',u'm20',u'm30',u'm60',u'm90',u'm120', u'm250', u'm500', u'm750', u'm1000', u'm1500', u'm2000', u'm2500']
df=pd.concat([MA(df_day.close, x) for x in (5,10,20,30,60,90,120,250,13, 34, 55,) ], axis = 1)
df.columns = [u'm5',u'm10',u'm20',u'm30',u'm60',u'm90',u'm120', u'm250', u'm13', u'm34', u'm55']
df_v=pd.concat([MA(df_day.vol, x) for x in (5,10,20,30,60,90,120,250,13, 34, 55,) ], axis = 1)
df_v.columns = [u'm5',u'm10',u'm20',u'm30',u'm60',u'm90',u'm120', u'm250', u'm13', u'm34', u'm55']
df_a=pd.concat([MA(df_day.amount, x) for x in (5,10,20,30,60,90,120,250,13, 34, 55,) ], axis = 1)
df_a.columns = [u'm5',u'm10',u'm20',u'm30',u'm60',u'm90',u'm120', u'm250', u'm13', u'm34', u'm55']
# self.log.info("data=%s, m5=%6.2f" % (self.code, df.m5.iloc[-1]))
self.upd_min(5)
# self.log.info()
# if now_vol > df_v.m5.iloc[-1]:
# if self._data['close'] > 0:
chag_pct = (self._data['now'] - self._data['close']) / self._data['close'] * 100
# else:
# chag_pct = 0.0
# self.log.info("code=%s now=%6.2f pct=%6.2f m5=%6.2f, now_vol=%10f, m5v=%10f" % (self.code, now_price, self._data['chg_pct'], df.m5.iloc[-1], now_vol, df_v.m5.iloc[-1]))
self.log.info("code=%s now=%6.2f pct=%6.2f m5=%6.2f, high=%6.2f, low=%6.2f" % (self.code, now_price, chag_pct, df.m5.iloc[-1], self._data['high'], self._data['low']))
# self.working = False
class Strategy(StrategyTemplate):
name = 'calc-min-data' ### day
idx = 0
# EventType = 'data-sina'
config_name = './config/stock2_list.json'
def __init__(self, user, log_handler, main_engine):
StrategyTemplate.__init__(self, user, log_handler, main_engine)
self.log.info('init event:%s'% self.name)
# self.redis = RedisIo()
# self.data_util = DataUtil()
# self.code_list = []
self.idx=0
self.calc_thread_dict = {}
# init data
start_time = time.time()
task_list = []
code_list = []
with open(self.config_name, 'r') as f:
data = json.load(f)
for d in data['code']:
if len(d) > 6:
d = d[len(d)-6:len(d)]
code_list.append(d)
# self.code_list.append(d)
# pool.apply_async(do_init_data_buf, args=(d, self.idx))
# do_init_data_buf(d, self.idx)
# poolThread.append(UpdateDataThread(d, self.idx))
task_list.append(executor.submit(do_init_data_buf, d, self.idx))
# self.calc_thread_dict[d] = calcStrategy(data['code'], self.log)
# pool.close()
# pool.join()
# pool.terminate()
# for c in poolThread:
# c.start()
#
# for c in poolThread:
# c.join()
for task in as_completed(task_list):
# result = task.result()
pass
self.log.info('init event end:%s, user-time=%d' % (self.name, time.time() - start_time))
## init message queue
self.started = False
self.easymq = EasyMq()
self.easymq.init_receive(exchange="stockcn")
self.easymq.callback = self.callback
with open(self.config_name, 'r') as f:
data = json.load(f)
for d in data['code']:
if len(d) > 6:
d = d[len(d)-6:len(d)]
self.easymq.add_sub_key(routing_key=d)
# self.code_list.append(d)
#
# pool.apply_async(do_init_data_buf, args=(d, self.idx, self.data_type))
# self.easymq.callback = mycallback
# self.easymq.start()
def strategy(self, event):
if self.started:
return
self.log.info('Strategy =%s, easymq started' % self.name)
self.started = True
self.easymq.start()
def callback(self, a, b, c, data):
# self.log.info('Strategy =%s, start calc...' % self.name)
# data = json.loads(data)
# t = calcStrategy(data['code'], data, self.log, self.idx)
# t.start()
pass
|
import sys
import numpy as np
import msgpack
import re
def reconstitute(filename, fieldnum):
chkpt = msgpack.load(open(filename, 'rb'))
mesh = chkpt['mesh']
primitive = np.zeros([mesh['ni'], mesh['nj'], 4])
for patch in chkpt['primitive_patches']:
i0 = patch['rect'][0]['start']
j0 = patch['rect'][1]['start']
i1 = patch['rect'][0]['end']
j1 = patch['rect'][1]['end']
local_prim = np.array(np.frombuffer(patch['data'])).reshape([i1 - i0, j1 - j0, 4])
primitive[i0:i1, j0:j1] = local_prim
rho, vx, vy, pres = primitive[:,:,0], primitive[:,:,1], primitive[:,:,2], primitive[:,:,3]
if fieldnum==-1:
return rho, vx, vy, pres
if fieldnum==0:
return rho
if fieldnum==1:
return vx
if fieldnum==2:
return vy
if fieldnum==3:
return pres
par = re.search('gamma_law_index=(.+?):', chkpt['parameters'])
if par==None:
gamma = 1.666666666666666
else:
gamma = np.float(par.group(1))
if fieldnum==4:
return pres / rho / (gamma - 1.)
if fieldnum==5:
cs = np.sqrt(gamma*pres/rho)
v = np.sqrt(vx**2 + vy**2)
return v/cs
nstr = str(np.char.zfill(str(Nchkpts[0]),4))
d = msgpack.load(open(fn+'chkpt.'+nstr+'.sf', 'rb'))
DR = np.float(re.search('domain_radius=(.+?):', d['parameters']).group(1))
N = d['mesh']['ni']
dx = d['mesh']['dx']
dy = dx*1
cfl = d['command_line']['cfl_number']
gamma = np.float(re.search('gamma_law_index=(.+?):', d['parameters']).group(1))
coolcoef = np.float(re.search('cooling_coefficient=(.+?):', d['parameters']).group(1))
buffrate = 10.0
buffwidth = 0.1
sinkrad = np.float(re.search('sink_radius=(.+?):', d['parameters']).group(1))
sinkrate = np.float(re.search('sink_rate=(.+?):', d['parameters']).group(1))
m1,m2 = d['masses'][0]['mass'], d['masses'][1]['mass']
alpha = np.float(re.search('alpha=(.+?):', d['parameters']).group(1))
x = np.arange((N))*dx - 2*DR/2. + dx/2.
xx,yy = np.zeros((N,N)),np.zeros((N,N))
for i in range(N):
xx[:,i] = x*1
yy[i,:] = x*1
rr = np.sqrt(xx**2+yy**2)
buffer_mask = np.ones((N,N)) #for summing errors only where buffer is inactive
for i in range(N):
for j in range(N):
if rr[i,j]>DR-buffwidth:
buffer_mask[i,j] = 0.0
def dery(v,dy):
dv = v*0
dv[:,1:-1] = v[:,2:] - v[:,:-2]
dv[:,0] = 0.0
dv[:, -1] = 0.0
return dv / (2.*dy)
def derx(v,dx):
dv = v*0
dv[1:-1,:] = v[2:,:] - v[:-2,:]
dv[0, :] = 0.0
dv[-1, :] = 0.0
return dv / (2.*dx)
def dert(v1,v2,t1,t2):
return (v2-v1)/(t2-t1)
def momentum(rho,v):
return rho*v
def energy(rho,vx,vy,eps):
return rho*eps + 0.5*rho*(vx**2+vy**2)
def compute_dert( rho_nm1, rho_np1,\
vx_nm1, vx_np1,\
vy_nm1, vy_np1,\
eps_nm1, eps_np1,\
t_nm1, t_np1):
dt_rho_n = dert( rho_nm1 , rho_np1 , t_nm1, t_np1 )
dt_momx_n = dert( momentum(rho_nm1, vx_nm1), momentum(rho_np1, vx_np1), t_nm1, t_np1 )
dt_momy_n = dert( momentum(rho_nm1, vy_nm1), momentum(rho_np1, vy_np1), t_nm1, t_np1 )
dt_en_n = dert( energy(rho_nm1, vx_nm1, vy_nm1, eps_nm1), \
energy(rho_np1, vx_np1, vy_np1, eps_np1), t_nm1, t_np1 )
return dt_rho_n, dt_momx_n, dt_momy_n, dt_en_n
def inviscid_fluxes_for_derx( rho, vx, vy, pres, eps ):
rhoflux = rho*vx
momxflux = rho*vx*vx + pres
momyflux = rho*vy*vx
enflux = (rho*eps + 0.5*rho*(vx**2 + vy**2) + pres)*vx
return rhoflux, momxflux, momyflux, enflux
def inviscid_fluxes_for_dery( rho, vx, vy, pres, eps ):
rhoflux = rho*vy
momxflux = rho*vx*vy
momyflux = rho*vy*vy + pres
enflux = (rho*eps + 0.5*rho*(vx**2 + vy**2) + pres)*vy
return rhoflux, momxflux, momyflux, enflux
def viscid_fluxes_for_derx( rho, vx, vy, pres, eps, nu ):
lam = 0
dxvx = derx(vx,dx)
dyvy = dery(vy,dy)
dxvy = derx(vy,dx)
dyvx = dery(vx,dy)
div = dxvx + dyvy
tau_xx = rho * nu * ( 2*dxvx - (2./3)*div ) + rho * lam * div
tau_yy = rho * nu * ( 2*dyvy - (2./3)*div ) + rho * lam * div
tau_xy = rho * nu * ( dyvx + dxvy )
tau_yx = tau_xy*1
momxflux = tau_xx
momyflux = tau_xy
enflux = vx*tau_xx + vy*tau_xy
return momxflux, momyflux, enflux
def viscid_fluxes_for_dery( rho, vx, vy, pres, eps, nu ):
lam = 0
dxvx = derx(vx,dx)
dyvy = dery(vy,dy)
dxvy = derx(vy,dx)
dyvx = dery(vx,dy)
div = dxvx + dyvy
tau_xx = rho * nu * ( 2*dxvx - (2./3)*div ) + rho * lam * div
tau_yy = rho * nu * ( 2*dyvy - (2./3)*div ) + rho * lam * div
tau_xy = rho * nu * ( dyvx + dxvy )
tau_yx = tau_xy*1
momxflux = tau_yx
momyflux = tau_yy
enflux = vx*tau_yx + vy*tau_yy
return momxflux, momyflux, enflux
def kinematic_viscosity( rho, pres, x, y, x1, y1, x2, y2, m1, m2):
cs2 = gamma*pres/rho
r1 = np.sqrt((x-x1)**2 + (y-y1)**2)
r2 = np.sqrt((x-x2)**2 + (y-y2)**2)
twof = 1./np.sqrt(m1/r1**3 + m2/r2**3)
return alpha*cs2*twof/np.sqrt(gamma)
def compute_all_fluxes_der( rho, vx, vy, pres, eps, x, y, x1, y1, x2, y2, m1, m2 ):
nu = kinematic_viscosity(rho,pres,x,y,x1,y1,x2,y2,m1,m2)
rhoflux_invisc_x, momxflux_invisc_x, momyflux_invisc_x, enflux_invisc_x = inviscid_fluxes_for_derx( rho, vx, vy, pres, eps )
rhoflux_invisc_y, momxflux_invisc_y, momyflux_invisc_y, enflux_invisc_y = inviscid_fluxes_for_dery( rho, vx, vy, pres, eps )
momxflux_visc_x, momyflux_visc_x, enflux_visc_x = viscid_fluxes_for_derx( rho, vx, vy, pres, eps, nu )
momxflux_visc_y, momyflux_visc_y, enflux_visc_y = viscid_fluxes_for_dery( rho, vx, vy, pres, eps, nu )
d_rhoflux = derx( rhoflux_invisc_x,dx) + dery( rhoflux_invisc_y,dy)
d_momxflux = derx(momxflux_invisc_x,dx) + dery(momxflux_invisc_y,dy) - derx(momxflux_visc_x,dx) - dery(momxflux_visc_y,dy)
d_momyflux = derx(momyflux_invisc_x,dx) + dery(momyflux_invisc_y,dy) - derx(momyflux_visc_x,dx) - dery(momyflux_visc_y,dy)
d_enflux = derx( enflux_invisc_x,dx) + dery( enflux_invisc_y,dy) - derx( enflux_visc_x,dx) - dery( enflux_visc_y,dy)
return d_rhoflux, d_momxflux, d_momyflux, d_enflux
def disk_height(rho, pres, x, y, x1, y1, x2, y2):
r1 = np.sqrt((x-x1)**2 + (y-y1)**2 + 1e-12)
r2 = np.sqrt((x-x2)**2 + (y-y2)**2 + 1e-12)
omeg = np.sqrt(m1/r1**3 + m2/r2**3)
return np.sqrt(pres/rho)/omeg
def sources_gravity( rho, pres, vx, vy, x, y, x1, y1, x2, y2 ):
r1 = np.sqrt((x-x1)**2 + (y-y1)**2)
r2 = np.sqrt((x-x2)**2 + (y-y2)**2)
h = disk_height(rho, pres, x, y, x1, y1, x2, y2)
rs1 = 0.5 * h
rs2 = 0.5 * h
for i in range(np.shape(r1)[0]):
for j in range(np.shape(r1)[1]):
if r1[i,j]<sinkrad:
transition = (1.0 - (r1[i,j]/sinkrad)**2)**2
rs1[i,j] = transition*sinkrad + (1-transition)*0.5*h[i,j]
if r2[i,j]<sinkrad:
transition = (1.0 - (r2[i,j]/sinkrad)**2)**2
rs2[i,j] = transition*sinkrad + (1-transition)*0.5*h[i,j]
fpre1= -rho * m1/(r1**2 + rs1**2)**(3./2)
fpre2= -rho * m2/(r2**2 + rs2**2)**(3./2)
f1x = fpre1 * (x-x1)
f1y = fpre1 * (y-y1)
f2x = fpre2 * (x-x2)
f2y = fpre2 * (y-y2)
momxsrc = f1x + f2x
momysrc = f1y + f2y
ensrc = vx*f1x + vy*f1y + vx*f2x + vy*f2y
return momxsrc, momysrc, ensrc
def sources_buffer( rho, vx, vy, pres, eps, rho0, vx0, vy0, pres0, eps0, x, y ): #inactive in testing mode
r = np.sqrt(x**2 + y**2)
massbuff = (rho - 1.0)
momxbuff = (rho*vx)
momybuff = (rho*vy)
v2 = vx**2 + vy**2
v20 = vx0**2 + vy0**2
en = rho *eps + 0.5*rho *v2
en0 = rho0*eps0 + 0.5*rho0*v20
enbuff = (en - 1.0/(5./3-1))
rbuff = r-(DR-buffwidth)
omeg_out = 0.0
buffwind = buffrate * omeg_out * (r - rbuff) / (DR - rbuff)
massbuff*= -buffwind
momxbuff*= -buffwind
momybuff*= -buffwind
enbuff *= -buffwind
return massbuff, momxbuff, momybuff, enbuff
def sources_cooling( rho, eps ):
return -coolcoef * eps**4 / rho
def sources_sinks( rho, vx, vy, pres, eps, x, y, xbh, ybh, vxbh, vybh ): #torque-free
rbh2 = (x-xbh)**2 + (y-ybh)**2
rbh = np.sqrt(rbh2)
xhat = (x-xbh)/(rbh+1e-12)
yhat = (y-ybh)/(rbh+1e-12)
vdotr = (vx-vxbh)*xhat + (vy-vybh)*yhat
vxstar = vdotr*xhat + vxbh
vystar = vdotr*yhat + vybh
momx = momentum(rho,vxstar)
momy = momentum(rho,vystar)
en = energy(rho,vxstar,vystar,eps)
s2 = sinkrad**2
sinkwind = np.exp(-(rbh2/s2)**2) * sinkrate
for i in range(N):
for j in range(N):
if rbh[i,j] >= sinkrad * 4.0:
sinkwind[i,j] = 0.0
mdot = -sinkwind * rho
return -sinkwind * rho, -sinkwind * momx, -sinkwind * momy, -sinkwind * en
n = Nchkpts[0]
nstr = str(np.char.zfill(str(n),4))
rho_nm1 = reconstitute(fn+'chkpt.'+nstr+'.sf',0)
vx_nm1 = reconstitute(fn+'chkpt.'+nstr+'.sf',1)
vy_nm1 = reconstitute(fn+'chkpt.'+nstr+'.sf',2)
pres_nm1= reconstitute(fn+'chkpt.'+nstr+'.sf',3)
eps_nm1 = reconstitute(fn+'chkpt.'+nstr+'.sf',4)
t_nm1 = msgpack.load(open(fn+'chkpt.'+nstr+'.sf', 'rb'))['time']
rho0 = rho_nm1*1 #Keep for buffer source terms
vx0 = vx_nm1*1
vy0 = vy_nm1*1
pres0 = pres_nm1*1
eps0 = eps_nm1*1
n = Nchkpts[1]
nstr = str(np.char.zfill(str(n),4))
d_n = msgpack.load(open(fn+'chkpt.'+nstr+'.sf', 'rb'))
rho_n = reconstitute(fn+'chkpt.'+nstr+'.sf',0)
vx_n = reconstitute(fn+'chkpt.'+nstr+'.sf',1)
vy_n = reconstitute(fn+'chkpt.'+nstr+'.sf',2)
pres_n = reconstitute(fn+'chkpt.'+nstr+'.sf',3)
eps_n = reconstitute(fn+'chkpt.'+nstr+'.sf',4)
t_n = d_n['time']
x1_n,y1_n = d_n['masses'][0]['x' ], d_n['masses'][0]['y' ]
x2_n,y2_n = d_n['masses'][1]['x' ], d_n['masses'][1]['y' ]
vx1_n,vy1_n = d_n['masses'][0]['vx'], d_n['masses'][0]['vy']
vx2_n,vy2_n = d_n['masses'][1]['vx'], d_n['masses'][1]['vy']
t = []
t.append(t_n)
n = Nchkpts[2]
nstr = str(np.char.zfill(str(n),4))
d_np1 = msgpack.load(open(fn+'chkpt.'+nstr+'.sf', 'rb'))
rho_np1 = reconstitute(fn+'chkpt.'+nstr+'.sf',0)
vx_np1 = reconstitute(fn+'chkpt.'+nstr+'.sf',1)
vy_np1 = reconstitute(fn+'chkpt.'+nstr+'.sf',2)
pres_np1= reconstitute(fn+'chkpt.'+nstr+'.sf',3)
eps_np1 = reconstitute(fn+'chkpt.'+nstr+'.sf',4)
t_np1 = d_np1['time']
x1_np1,y1_np1 = d_np1['masses'][0]['x' ], d_np1['masses'][0]['y' ]
x2_np1,y2_np1 = d_np1['masses'][1]['x' ], d_np1['masses'][1]['y' ]
vx1_np1,vy1_np1 = d_np1['masses'][0]['vx'], d_np1['masses'][0]['vy']
vx2_np1,vy2_np1 = d_np1['masses'][1]['vx'], d_np1['masses'][1]['vy']
dt_rho_n, dt_momx_n, dt_momy_n, dt_en_n = compute_dert( rho_nm1, rho_np1,\
vx_nm1, vx_np1,\
vy_nm1, vy_np1,\
eps_nm1, eps_np1,\
t_nm1, t_np1)
d_rhoflux_n, d_momxflux_n, d_momyflux_n, d_enflux_n = compute_all_fluxes_der( rho_n, vx_n, vy_n, pres_n, eps_n, xx, yy, x1_n, y1_n, x2_n, y2_n, m1, m2 )
momxsrc_grav, momysrc_grav, ensrc_grav = sources_gravity( rho_n, pres_n, vx_n, vy_n, xx, yy, x1_n, y1_n, x2_n, y2_n )
rhosrc_buff, momxsrc_buff, momysrc_buff, ensrc_buff = sources_buffer( rho_n, vx_n, vy_n, pres_n, eps_n, rho0, vx0, vy0, pres0, eps0, xx, yy )
ensrc_cool = sources_cooling( rho_n, eps_n )
rhosrc_sink1, momxsrc_sink1, momysrc_sink1, ensrc_sink1 = sources_sinks( rho_n, vx_n, vy_n, pres_n, eps_n, xx, yy, x1_n, y1_n, vx1_n, vy1_n )
rhosrc_sink2, momxsrc_sink2, momysrc_sink2, ensrc_sink2 = sources_sinks( rho_n, vx_n, vy_n, pres_n, eps_n, xx, yy, x2_n, y2_n, vx2_n, vy2_n )
rhores ,momxres ,momyres ,enres = [],[],[],[]
rhores_L2,momxres_L2,momyres_L2,enres_L2 = [],[],[],[]
rhores. append( dt_rho_n + d_rhoflux_n - rhosrc_buff - rhosrc_sink1 - rhosrc_sink2 )
momxres.append( dt_momx_n + d_momxflux_n - momxsrc_buff - momxsrc_grav - momxsrc_sink1 - momxsrc_sink2 )
momyres.append( dt_momy_n + d_momyflux_n - momysrc_buff - momysrc_grav - momysrc_sink1 - momysrc_sink2 )
enres. append( dt_en_n + d_enflux_n - ensrc_buff - ensrc_grav - ensrc_cool - ensrc_sink1 - ensrc_sink2 )
l,r=0,N
rhores_L2. append( np.sqrt(np.average(( rhores[-1][l:r,l:r]*buffer_mask)**2)) )
momxres_L2.append( np.sqrt(np.average((momxres[-1][l:r,l:r]*buffer_mask)**2)) )
momyres_L2.append( np.sqrt(np.average((momyres[-1][l:r,l:r]*buffer_mask)**2)) )
enres_L2. append( np.sqrt(np.average(( enres[-1][l:r,l:r]*buffer_mask)**2)) )
for i in range(3,len(Nchkpts)-1):
print("Analyzing checkpoint",i,"of",len(Nchkpts)-2)
rho_nm1 = rho_n*1
vx_nm1 = vx_n*1
vy_nm1 = vy_n*1
pres_nm1= pres_n*1
eps_nm1 = eps_n*1
t_nm1 = t_n*1
rho_n = rho_np1*1
vx_n = vx_np1*1
vy_n = vy_np1*1
pres_n = pres_np1*1
eps_n = eps_np1*1
t_n = t_np1*1
t.append(t_n)
x1_n,y1_n = x1_np1*1,y1_np1*1
x2_n,y2_n = x2_np1*1,y2_np1*1
vx1_n,vy1_n = vx1_np1*1,vy1_np1*1
vx2_n,vy2_n = vx2_np1*1,vy2_np1*1
n = Nchkpts[i]
nstr = str(np.char.zfill(str(n),4))
d_np1 = msgpack.load(open(fn+'chkpt.'+nstr+'.sf','rb'))
rho_np1 = reconstitute(fn+'chkpt.'+nstr+'.sf',0)
vx_np1 = reconstitute(fn+'chkpt.'+nstr+'.sf',1)
vy_np1 = reconstitute(fn+'chkpt.'+nstr+'.sf',2)
pres_np1= reconstitute(fn+'chkpt.'+nstr+'.sf',3)
eps_np1 = reconstitute(fn+'chkpt.'+nstr+'.sf',4)
t_np1 = d_np1['time']
x1_np1,y1_np1 = d_np1['masses'][0]['x' ], d_np1['masses'][0]['y' ]
x2_np1,y2_np1 = d_np1['masses'][1]['x' ], d_np1['masses'][1]['y' ]
vx1_np1,vy1_np1 = d_np1['masses'][0]['vx'], d_np1['masses'][0]['vy']
vx2_np1,vy2_np1 = d_np1['masses'][1]['vx'], d_np1['masses'][1]['vy']
dt_rho_n, dt_momx_n, dt_momy_n, dt_en_n = compute_dert( rho_nm1, rho_np1,\
vx_nm1, vx_np1,\
vy_nm1, vy_np1,\
eps_nm1, eps_np1,\
t_nm1, t_np1)
d_rhoflux_n, d_momxflux_n, d_momyflux_n, d_enflux_n = compute_all_fluxes_der( rho_n, vx_n, vy_n, pres_n, eps_n, xx, yy, x1_n, y1_n, x2_n, y2_n, m1, m2 )
momxsrc_grav, momysrc_grav, ensrc_grav = sources_gravity( rho_n, pres_n, vx_n, vy_n, xx, yy, x1_n, y1_n, x2_n, y2_n )
rhosrc_buff, momxsrc_buff, momysrc_buff, ensrc_buff = sources_buffer( rho_n, vx_n, vy_n, pres_n, eps_n, rho0, vx0, vy0, pres0, eps0, xx, yy )
ensrc_cool = sources_cooling( rho_n, eps_n )
rhosrc_sink1, momxsrc_sink1, momysrc_sink1, ensrc_sink1 = sources_sinks( rho_n, vx_n, vy_n, pres_n, eps_n, xx, yy, x1_n, y1_n, vx1_n, vy1_n )
rhosrc_sink2, momxsrc_sink2, momysrc_sink2, ensrc_sink2 = sources_sinks( rho_n, vx_n, vy_n, pres_n, eps_n, xx, yy, x2_n, y2_n, vx2_n, vy2_n )
rhores, momxres, momyres, enres = [],[],[],[] #kill old lists, otherwise memory usage grows
rhores. append( dt_rho_n + d_rhoflux_n - rhosrc_buff - rhosrc_sink1 - rhosrc_sink2 )
momxres.append( dt_momx_n + d_momxflux_n - momxsrc_buff - momxsrc_grav - momxsrc_sink1 - momxsrc_sink2 )
momyres.append( dt_momy_n + d_momyflux_n - momysrc_buff - momysrc_grav - momysrc_sink1 - momysrc_sink2 )
enres. append( dt_en_n + d_enflux_n - ensrc_buff - ensrc_grav - ensrc_cool - ensrc_sink1 - ensrc_sink2 )
rhores_L2. append( np.sqrt(np.average(( rhores[-1][l:r,l:r]*buffer_mask)**2)) )
momxres_L2.append( np.sqrt(np.average((momxres[-1][l:r,l:r]*buffer_mask)**2)) )
momyres_L2.append( np.sqrt(np.average((momyres[-1][l:r,l:r]*buffer_mask)**2)) )
enres_L2. append( np.sqrt(np.average(( enres[-1][l:r,l:r]*buffer_mask)**2)) )
rhores_L2 = np.array( rhores_L2)
momxres_L2 = np.array(momxres_L2)
momyres_L2 = np.array(momyres_L2)
enres_L2 = np.array( enres_L2)
t = np.array(t)
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required(self):
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@test.com',
'password'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Butter')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
user2 = get_user_model().objects.create_user(
'test2@test.com',
'password'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
"""Test create a new ingredient"""
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name'],
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingriedents_assigned_to_recipes(self):
ingredient1 = Ingredient.objects.create(
user=self.user, name='Apples'
)
ingredient2 = Ingredient.objects.create(
user=self.user, name='Turkey'
)
recipe = Recipe.objects.create(
title='Apple pie',
time_minutes=5,
price=4,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredients_assigned_unique(self):
ingredient = Ingredient.objects.create(user=self.user, name='Eggs')
Ingredient.objects.create(user=self.user, name='Cheese')
recipe1 = Recipe.objects.create(
title='Eggs benedict',
time_minutes=5,
price=7.00,
user=self.user
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=15,
price=4.00,
user=self.user
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
|
# -*- coding: utf-8 -*-
import os
import json
from splash import defaults
from splash.utils import to_bytes, path_join_secure
from splash.errors import BadOption
class RenderOptions(object):
"""
Options that control how to render a response.
"""
_REQUIRED = object()
def __init__(self, data, max_timeout):
self.data = data
self.max_timeout = max_timeout
@classmethod
def raise_error(cls, argument, description, type='bad_argument', **kwargs):
params = {
'type': type,
'argument': argument,
'description': description
}
params.update(kwargs)
raise BadOption(params)
@classmethod
def fromrequest(cls, request, max_timeout):
"""
Initialize options from a Twisted Request.
"""
# 1. GET / POST data
data = {key.decode('utf-8'): values[0].decode('utf-8') for key, values
in request.args.items()}
if request.method == b'POST':
content_type = request.getHeader(b'content-type')
if content_type:
request.content.seek(0)
# 2. application/json POST data
if b'application/json' in content_type:
try:
content = request.content.read().decode('utf-8')
data.update(json.loads(content, encoding='utf8'))
except ValueError as e:
raise BadOption({
'type': 'invalid_json',
'description': "Can't decode JSON",
'message': str(e),
})
# 3. js_source from application/javascript POST requests
if b'application/javascript' in content_type:
data['js_source'] = request.content.read().decode('utf-8')
request.content.seek(0)
data['uid'] = id(request)
return cls(data, max_timeout)
def get_expired_args(self, cache):
"""
Return a list of argument names from load_args which can't be loaded
"""
return cache.get_missing(self.get_load_args().items())
def save_args_to_cache(self, cache):
"""
Process save_args and put all values to cache.
Return a list of (name, key) pairs.
"""
save_args = self.get_save_args()
save_values = [self.data.get(name) for name in save_args]
keys = cache.add_many(save_values)
return list(zip(save_args, keys))
def load_cached_args(self, cache):
load_args = self.get_load_args()
for name, key in (load_args or {}).items():
self.data[name] = cache[key]
def get(self, name, default=_REQUIRED, type=str, range=None):
value = self.data.get(name)
if value is not None:
if type is not None:
try:
value = type(value)
except ValueError:
msg = "Argument %r has a wrong type" % (name,)
self.raise_error(name, msg, required_type=type.__name__)
if range is not None and not (range[0] <= value <= range[1]):
self.raise_error(name, 'Argument is out of the allowed range',
min=range[0], max=range[1], value=value)
return value
elif default is self._REQUIRED:
self.raise_error(name, 'Required argument is missing: %s' % name,
type='argument_required')
else:
return default
def _get_bool(self, name, default=_REQUIRED):
return self.get(name, default, type=int, range=(0, 1))
def _get_url(self, name, default=_REQUIRED):
url = self.get(name, default, type=None)
if isinstance(url, bytes):
url = url.decode('utf8')
return url
def get_uid(self):
return self.get('uid')
def get_url(self):
return self._get_url("url")
def get_baseurl(self):
return self._get_url("baseurl", default=None)
def get_wait(self):
return self.get("wait", defaults.WAIT_TIME, type=float,
range=(0, self.get_timeout()))
def get_timeout(self):
default = min(self.max_timeout, defaults.TIMEOUT)
return self.get("timeout", default, type=float,
range=(0, self.max_timeout))
def get_resource_timeout(self):
return self.get("resource_timeout", defaults.RESOURCE_TIMEOUT,
type=float, range=(0, 1e6))
def get_response_body(self):
return self._get_bool("response_body", defaults.RESPONSE_BODY_ENABLED)
def get_request_body(self):
return self._get_bool("request_body", defaults.REQUEST_BODY_ENABLED)
def get_images(self):
return self._get_bool("images", defaults.AUTOLOAD_IMAGES)
def get_proxy(self):
return self.get("proxy", default=None)
def get_js_source(self):
return self.get("js_source", default=None)
def get_width(self):
return self.get("width", None, type=int, range=(1, defaults.MAX_WIDTH))
def get_height(self):
return self.get("height", None, type=int,
range=(1, defaults.MAX_HEIGTH))
def get_scale_method(self):
scale_method = self.get("scale_method", defaults.IMAGE_SCALE_METHOD)
allowed_scale_methods = ['raster', 'vector']
if scale_method not in allowed_scale_methods:
self.raise_error(
argument='scale_method',
description="Invalid 'scale_method': %s" % scale_method,
allowed=allowed_scale_methods,
received=scale_method,
)
return scale_method
def get_quality(self):
return self.get("quality", defaults.JPEG_QUALITY, type=int, range=(0, 100))
def get_http_method(self):
method = self.get("http_method", "GET")
if method.upper() not in ["POST", "GET"]:
self.raise_error("http_method", "Unsupported HTTP method {}".format(method))
return method
def get_body(self):
body = self.get("body", None, to_bytes)
method = self.get("http_method", "GET").upper()
if method == 'GET' and body:
self.raise_error("body", "GET request should not have a body")
return body
def get_render_all(self, wait=None):
result = self._get_bool("render_all", False)
if result == 1 and wait == 0:
self.raise_error("render_all",
"Pass non-zero 'wait' to render full webpage")
return result
def get_lua_source(self):
return self.get("lua_source")
def get_js_profile(self, js_profiles_path):
js_profile = self.get("js", default=None)
if not js_profile:
return js_profile
if js_profiles_path is None:
self.raise_error('js',
'Javascript profiles are not enabled on server')
try:
profile_dir = path_join_secure(js_profiles_path, js_profile)
except ValueError as e:
# security check fails
print(e)
self.raise_error('js', 'Javascript profile does not exist')
if not os.path.isdir(profile_dir):
self.raise_error('js', 'Javascript profile does not exist')
return profile_dir
def get_headers(self):
headers = self.get("headers", default=None, type=None)
if headers is None:
return headers
if not isinstance(headers, (list, tuple, dict)):
self.raise_error(
argument='headers',
description="'headers' must be either a JSON array of "
"(name, value) pairs or a JSON object"
)
if isinstance(headers, (list, tuple)):
for el in headers:
string_only = all(isinstance(e, str) for e in el)
if not (isinstance(el, (list, tuple)) and len(el) == 2 and string_only):
self.raise_error(
argument='headers',
description="'headers' must be either a JSON array of "
"(name, value) pairs or a JSON object"
)
return headers
def get_save_args(self):
save_args = self.get("save_args", default=None, type=None)
if save_args is None:
return []
if isinstance(save_args, str):
# comma-separated string
save_args = save_args.split(',')
if not isinstance(save_args, list):
self.raise_error(
argument="save_args",
description="'save_args' should be either a comma-separated "
"string or a JSON array with argument names",
)
# JSON array
if not all(isinstance(a, str) for a in save_args):
self.raise_error(
argument="save_args",
description="'save_args' should be a list of strings",
)
return save_args
def get_load_args(self):
load_args = self.get("load_args", default=None, type=None)
if load_args is None:
return {}
if isinstance(load_args, str):
try:
load_args = dict(
kv.split("=", 1) for kv in load_args.split(';')
)
except ValueError:
self.raise_error(
argument="load_args",
description="'load_args' string value is not a "
"semicolon-separated list of name=hash pairs"
)
if not isinstance(load_args, dict):
self.raise_error(
argument="load_args",
description="'load_args' should be either a JSON object with "
"argument hashes or a semicolon-separated list "
"of name=hash pairs"
)
return load_args
def get_viewport(self, wait=None):
viewport = self.get("viewport", defaults.VIEWPORT_SIZE)
if viewport == 'full':
if wait == 0:
self.raise_error("viewport",
"Pass non-zero 'wait' to render full webpage")
else:
try:
validate_size_str(viewport)
except ValueError as e:
self.raise_error("viewport", str(e))
return viewport
def get_filters(self, pool=None, adblock_rules=None):
filter_names = self.get('filters', '')
filter_names = [f for f in filter_names.split(',') if f]
if pool is None and adblock_rules is None: # skip validation
return filter_names
if not filter_names:
return filter_names
if pool is not None:
adblock_rules = pool.network_manager_factory.adblock_rules
if adblock_rules is None:
self.raise_error(
"filters",
"Invalid filter names: %s" % (filter_names,)
)
if adblock_rules is not None:
unknown_filters = adblock_rules.get_unknown_filters(filter_names)
if unknown_filters:
self.raise_error(
"filters",
"Invalid filter names: %s" % (unknown_filters,)
)
return filter_names
def get_allowed_domains(self):
allowed_domains = self.get("allowed_domains", default=None)
if allowed_domains is not None:
return allowed_domains.split(',')
def get_allowed_content_types(self):
content_types = self.get("allowed_content_types", default=['*'])
if isinstance(content_types, str):
content_types = list(filter(None, content_types.split(',')))
return content_types
def get_forbidden_content_types(self):
content_types = self.get("forbidden_content_types", default=[])
if isinstance(content_types, str):
content_types = list(filter(None, content_types.split(',')))
return content_types
def get_html5_media(self):
return self._get_bool("html5_media", defaults.HTML5_MEDIA_ENABLED)
def get_engine(self, browser_engines_enabled=None):
engine = self.get("engine", default="webkit", type=str)
if engine not in {"webkit", "chromium"}:
self.raise_error("engine", "Unknown render engine {}".format(engine))
if browser_engines_enabled is not None:
if engine not in browser_engines_enabled:
self.raise_error("engine", "Disabled render engine {}".format(engine))
return engine
def get_http2(self):
engine = self.get_engine()
if self.get_engine() == "webkit":
default = defaults.WEBKIT_HTTP2_ENABLED
else:
assert engine == 'chromium'
default = defaults.CHROMIUM_HTTP2_ENABLED
return self._get_bool("http2", default)
def get_common_params(self, js_profiles_path):
wait = self.get_wait()
return {
'url': self.get_url(),
'baseurl': self.get_baseurl(),
'wait': wait,
'resource_timeout': self.get_resource_timeout(),
'viewport': self.get_viewport(wait),
'render_all': self.get_render_all(wait),
'images': self.get_images(),
'headers': self.get_headers(),
'proxy': self.get_proxy(),
'js_profile': self.get_js_profile(js_profiles_path),
'js_source': self.get_js_source(),
'http_method': self.get_http_method(),
'body': self.get_body(),
'html5_media': self.get_html5_media(),
'http2': self.get_http2(),
# 'lua': self.get_lua(),
}
def get_image_params(self):
return {
'width': self.get_width(),
'height': self.get_height(),
'scale_method': self.get_scale_method()
}
def get_png_params(self):
return self.get_image_params()
def get_jpeg_params(self):
params = {'quality': self.get_quality()}
params.update(self.get_image_params())
return params
def get_include_params(self):
return dict(
html=self._get_bool("html", defaults.DO_HTML),
iframes=self._get_bool("iframes", defaults.DO_IFRAMES),
png=self._get_bool("png", defaults.DO_PNG),
jpeg=self._get_bool("jpeg", defaults.DO_JPEG),
script=self._get_bool("script", defaults.SHOW_SCRIPT),
console=self._get_bool("console", defaults.SHOW_CONSOLE),
history=self._get_bool("history", defaults.SHOW_HISTORY),
har=self._get_bool("har", defaults.SHOW_HAR),
)
def validate_size_str(size_str):
"""
Validate size string in WxH format.
Can be used to validate both viewport and window size strings. Does not
special-case ``'full'`` viewport. Raises ``ValueError`` if anything goes
wrong.
:param size_str: string to validate
"""
max_width = defaults.VIEWPORT_MAX_WIDTH
max_heigth = defaults.VIEWPORT_MAX_HEIGTH
max_area = defaults.VIEWPORT_MAX_AREA
try:
w, h = map(int, size_str.split('x'))
except ValueError:
raise ValueError("Invalid viewport format: %s" % size_str)
else:
if not ((0 < w <= max_width) and (0 < h <= max_heigth) and
(w * h < max_area)):
raise ValueError("Viewport is out of range (%dx%d, area=%d)" %
(max_width, max_heigth, max_area))
|
from singleton.singleton import Singleton
@Singleton
class Config(object):
def __init__(self, vars = []):
self.vars = vars
def get_vars():
return Config.instance().vars
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def inorderSuccessor(self, root: 'TreeNode', p: 'TreeNode') -> 'TreeNode':
def inorder(node):
if(node==None):
return []
return inorder(node.left) + [node.val] + inorder(node.right)
L = inorder(root)
for i in range(len(L)-1):
print(L[i],p)
if(L[i] == p.val):
return TreeNode(L[i+1])
return None
|
from urlparse import urlparse
import gevent
import os
import socket
import traceback
import boto
from . import calling_format
from wal_e import files
from wal_e import log_help
from wal_e.exception import UserException
from wal_e.pipeline import get_download_pipeline
from wal_e.piper import PIPE
from wal_e.retries import retry, retry_with_count
logger = log_help.WalELogger(__name__)
# Set a timeout for boto HTTP operations should no timeout be set.
# Yes, in the case the user *wanted* no timeouts, this would set one.
# If that becomes a problem, someone should post a bug, although I am
# having a hard time imagining why that behavior could ever be useful.
if not boto.config.has_option('Boto', 'http_socket_timeout'):
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
boto.config.set('Boto', 'http_socket_timeout', '30')
def _uri_to_key(creds, uri, conn=None):
assert uri.startswith('s3://')
url_tup = urlparse(uri)
bucket_name = url_tup.netloc
cinfo = calling_format.from_store_name(bucket_name)
if conn is None:
conn = cinfo.connect(creds)
bucket = boto.s3.bucket.Bucket(connection=conn, name=bucket_name)
return boto.s3.key.Key(bucket=bucket, name=url_tup.path)
def uri_put_file(creds, uri, fp, content_encoding=None, conn=None):
# Per Boto 2.2.2, which will only read from the current file
# position to the end. This manifests as successfully uploaded
# *empty* keys in S3 instead of the intended data because of how
# tempfiles are used (create, fill, submit to boto).
#
# It is presumed it is the caller's responsibility to rewind the
# file position, and since the whole program was written with this
# in mind, assert it as a precondition for using this procedure.
assert fp.tell() == 0
k = _uri_to_key(creds, uri, conn=conn)
if content_encoding is not None:
k.content_type = content_encoding
k.set_contents_from_file(fp, encrypt_key=True)
return k
def uri_get_file(creds, uri, conn=None):
k = _uri_to_key(creds, uri, conn=conn)
return k.get_contents_as_string()
def do_lzop_get(creds, url, path, decrypt, do_retry=True):
"""
Get and decompress a S3 URL
This streams the content directly to lzop; the compressed version
is never stored on disk.
"""
assert url.endswith('.lzo'), 'Expect an lzop-compressed file'
def log_wal_fetch_failures_on_error(exc_tup, exc_processor_cxt):
def standard_detail_message(prefix=''):
return (prefix + ' There have been {n} attempts to fetch wal '
'file {url} so far.'.format(n=exc_processor_cxt, url=url))
typ, value, tb = exc_tup
del exc_tup
# Screen for certain kinds of known-errors to retry from
if issubclass(typ, socket.error):
socketmsg = value[1] if isinstance(value, tuple) else value
logger.info(
msg='Retrying fetch because of a socket error',
detail=standard_detail_message(
"The socket error's message is '{0}'."
.format(socketmsg)))
elif (issubclass(typ, boto.exception.S3ResponseError) and
value.error_code == 'RequestTimeTooSkewed'):
logger.info(msg='Retrying fetch because of a Request Skew time',
detail=standard_detail_message())
else:
# For all otherwise untreated exceptions, report them as a
# warning and retry anyway -- all exceptions that can be
# justified should be treated and have error messages
# listed.
logger.warning(
msg='retrying WAL file fetch from unexpected exception',
detail=standard_detail_message(
'The exception type is {etype} and its value is '
'{evalue} and its traceback is {etraceback}'
.format(etype=typ, evalue=value,
etraceback=''.join(traceback.format_tb(tb)))))
# Help Python GC by resolving possible cycles
del tb
def download():
with files.DeleteOnError(path) as decomp_out:
key = _uri_to_key(creds, url)
with get_download_pipeline(PIPE, decomp_out.f, decrypt) as pl:
g = gevent.spawn(write_and_return_error, key, pl.stdin)
try:
# Raise any exceptions from write_and_return_error
exc = g.get()
if exc is not None:
raise exc
except boto.exception.S3ResponseError, e:
if e.status == 404:
# Do not retry if the key not present, this
# can happen under normal situations.
pl.abort()
logger.warning(
msg=('could no longer locate object while '
'performing wal restore'),
detail=('The absolute URI that could not be '
'located is {url}.'.format(url=url)),
hint=('This can be normal when Postgres is trying '
'to detect what timelines are available '
'during restoration.'))
decomp_out.remove_regardless = True
return False
elif e.value.error_code == 'ExpiredToken':
# Do not retry if STS token has expired. It can never
# succeed in the future anyway.
pl.abort()
logger.info(
msg=('could no longer authenticate while '
'performing wal restore'),
detail=('The absolute URI that could not be '
'accessed is {url}.'.format(url=url)),
hint=('This can be normal when using STS '
'credentials.'))
decomp_out.remove_regardless = True
return False
else:
logger.warning(msg='S3 response error',
detail='The error is: {0}, {1}'
.format(e.error_code, e.error_message))
raise
logger.info(
msg='completed download and decompression',
detail='Downloaded and decompressed "{url}" to "{path}"'
.format(url=url, path=path))
return True
if do_retry:
download = retry(
retry_with_count(log_wal_fetch_failures_on_error))(download)
return download()
def sigv4_check_apply():
# Insist that one of AWS_REGION or WALE_S3_ENDPOINT is defined.
# The former is for authenticating correctly with AWS SigV4.
#
# The latter is for alternate implementations that are
# S3-interface compatible. Many, or most, of these do not support
# AWS SigV4 at all and none are known to require SigV4 (and
# instead use the non-region-demanding SigV2), so simplify by
# relaxing the AWS_REGION requirement in that case.
region = os.getenv('AWS_REGION')
endpoint = os.getenv('WALE_S3_ENDPOINT')
if region and endpoint:
logger.warning(msg='WALE_S3_ENDPOINT defined, ignoring AWS_REGION',
hint='AWS_REGION is only intended for use with AWS S3, '
'and not interface-compatible use cases supported by '
'WALE_S3_ENDPOINT')
elif region and not endpoint:
# Normal case for an AWS user: Set up SigV4, which can only be
# enacted globally.
if not boto.config.has_option('s3', 'use-sigv4'):
if not boto.config.has_section('s3'):
boto.config.add_section('s3')
boto.config.set('s3', 'use-sigv4', 'True')
elif not region and endpoint:
# Normal case for a S3-interface-compatible user, e.g. RADOS
# or Deis users. SigV4 doesn't have the same level of uptake
# on those implementations.
pass
elif not region and not endpoint:
raise UserException(
msg='must define one of AWS_REGION or WALE_S3_ENDPOINT',
hint=(
'AWS users will want to set AWS_REGION, those using '
'alternative S3-compatible systems will want to use '
'WALE_S3_ENDPOINT.'
)
)
else:
# Entire Cartesian product should be handled.
assert False
def write_and_return_error(key, stream):
try:
key.get_contents_to_file(stream)
stream.flush()
except Exception, e:
return e
finally:
stream.close()
|
""" Contains all the custom exceptions used. """
class DirectoryAccessError(Exception):
""" Exception to be raised when the directory can't be accessed. """
pass
class DirectoryCreateError(Exception):
""" Exception to be raised when the directory can't be created. """
pass
class ImageDownloadError(Exception):
""" Exception to be raised when the image can't be downloaded. """
status_code = 0
def __init__(self, status_code=0):
super(ImageDownloadError, self).__init__()
self.status_code = status_code
class ImageSizeError(Exception):
""" Exception to be raised when the image is over the file size. """
image_size = 0
def __init__(self, image_size):
super(ImageSizeError, self).__init__()
self.image_size = image_size
class PageLoadError(Exception):
""" Exception to be raised when the page can't be loaded. """
status_code = 0
def __init__(self, status_code):
super(PageLoadError, self).__init__()
self.status_code = status_code
|
from pkg_resources import parse_version
from configparser import ConfigParser
import setuptools
assert parse_version(setuptools.__version__) >= parse_version("36.2")
# note: all settings are in settings.ini; edit there, not here
config = ConfigParser(delimiters=["="])
config.read("settings.ini")
cfg = config["DEFAULT"]
cfg_keys = "version description keywords author author_email".split()
expected = (
cfg_keys
+ "lib_name user branch license status min_python audience language".split()
)
for o in expected:
assert o in cfg, "missing expected setting: {}".format(o)
setup_cfg = {o: cfg[o] for o in cfg_keys}
licenses = {
"apache2": (
"Apache Software License 2.0",
"OSI Approved :: Apache Software License",
),
}
statuses = [
"1 - Planning",
"2 - Pre-Alpha",
"3 - Alpha",
"4 - Beta",
"5 - Production/Stable",
"6 - Mature",
"7 - Inactive",
]
py_versions = (
"2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8".split()
)
extras = {}
requirements = open('requirements.txt').read().split('\n')
lic = licenses[cfg["license"]]
min_python = cfg["min_python"]
setuptools.setup(
name=cfg["lib_name"],
license=lic[0],
classifiers=[
"Development Status :: " + statuses[int(cfg["status"])],
"Intended Audience :: " + cfg["audience"].title(),
"License :: " + lic[1],
"Natural Language :: " + cfg["language"].title(),
]
+ [
"Programming Language :: Python :: " + o
for o in py_versions[py_versions.index(min_python) :]
],
url=cfg["git_url"],
packages=setuptools.find_packages(),
include_package_data=True,
install_requires=requirements,
extras_require=extras,
dependency_links=cfg.get("dep_links", "").split(),
python_requires=">=" + cfg["min_python"],
long_description=open("README.md", encoding="utf-8", errors="ignore").read(),
long_description_content_type="text/markdown",
zip_safe=False,
entry_points={"console_scripts": cfg.get("console_scripts", "").split()},
**setup_cfg
)
|
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import numpy as np
def save_figure_to_numpy(fig):
# save it to a numpy array.
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
def plot_alignment(alignment, fn):
# [4, encoder_step, decoder_step]
fig, axes = plt.subplots(2, 2)
for i in range(2):
for j in range(2):
g = axes[i][j].imshow(alignment[i*2+j,:,:].T,
aspect='auto', origin='lower',
interpolation='none')
plt.colorbar(g, ax=axes[i][j])
plt.savefig(fn)
plt.close()
return fn
def plot_alignment_to_numpy(alignment, info=None):
fig, ax = plt.subplots(figsize=(6, 4))
im = ax.imshow(alignment, aspect='auto', origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
xlabel += '\n\n' + info
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_spectrogram_to_numpy(spectrogram):
fig, ax = plt.subplots(figsize=(12, 3))
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
interpolation='none')
plt.colorbar(im, ax=ax)
plt.xlabel("Frames")
plt.ylabel("Channels")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_gate_outputs_to_numpy(gate_targets, gate_outputs):
fig, ax = plt.subplots(figsize=(12, 3))
ax.scatter(list(range(len(gate_targets))), gate_targets, alpha=0.5,
color='green', marker='+', s=1, label='target')
ax.scatter(list(range(len(gate_outputs))), gate_outputs, alpha=0.5,
color='red', marker='.', s=1, label='predicted')
plt.xlabel("Frames (Green target, Red predicted)")
plt.ylabel("Gate State")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
|
#! /usr/bin/env python
from setuptools import setup, Command
from subprocess import check_call
from distutils.spawn import find_executable
import cpplint as cpplint
class Cmd(Command):
'''
Superclass for other commands to run via setup.py, declared in setup.cfg.
These commands will auto-install setup_requires in a temporary folder.
'''
user_options = [
('executable', 'e', 'The executable to use for the command')
]
def initialize_options(self):
self.executable = find_executable(self.executable)
def finalize_options(self):
pass
def execute(self, *k):
check_call((self.executable,) + k)
class Lint(Cmd):
'''run with python setup.py lint'''
description = 'Run linting of the code'
user_options = Cmd.user_options + [
('jobs', 'j', 'Use multiple processes to speed up the linting')
]
executable = 'pylint'
def run(self):
self.execute('cpplint.py')
# some pip versions bark on comments (e.g. on travis)
def read_without_comments(filename):
with open(filename) as f:
return [line for line in f.read().splitlines() if not len(line) == 0 and not line.startswith('#')]
test_required = read_without_comments('test-requirements')
setup(name='cpplint',
version=cpplint.__VERSION__,
py_modules=['cpplint'],
# generate platform specific start script
entry_points={
'console_scripts': [
'cpplint = cpplint:main'
]
},
install_requires=[],
url='https://github.com/cpplint/cpplint',
download_url='https://github.com/cpplint/cpplint',
keywords=['lint', 'python', 'c++'],
maintainer='cpplint Developers',
maintainer_email='see_github@nospam.com',
classifiers=['Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: C++',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Topic :: Software Development :: Quality Assurance',
'License :: Freely Distributable'],
description='Automated checker to ensure C++ files follow Google\'s style guide',
long_description=open('README.rst').read(),
license='BSD-3-Clause',
setup_requires=[
"pytest-runner"
],
tests_require=test_required,
# extras_require allow pip install .[dev]
extras_require={
'test': test_required,
'dev': read_without_comments('dev-requirements') + test_required
},
cmdclass={
'lint': Lint
})
|
import sys
sys.path.append('..')
from helpers import render_frames
from graphs.PathTracer import PathTracer as g
from falcor import *
m.addGraph(g)
m.loadScene('Arcade/Arcade.pyscene')
# default
render_frames(m, 'default', frames=[128])
exit()
|
import numpy
from exojax.spec.rtransfer import nugrid
from exojax.spec import AutoXS
from exojax.spec import AutoRT
import matplotlib.pyplot as plt
if False:
nus=numpy.logspace(numpy.log10(1900.0),numpy.log10(2300.0),160000,dtype=numpy.float64)
#nus=numpy.logspace(numpy.log10(2041.6),numpy.log10(2041.7),10000,dtype=numpy.float64)
#nus=numpy.logspace(numpy.log10(2040),numpy.log10(2043),10000,dtype=numpy.float64)
#MODIT worse for higher-T and/or lower-P
T=2000.0
P=1.0
autoxs=AutoXS(nus,"ExoMol","CO",xsmode="LPF")
xsv0=autoxs.xsection(T,P)
autoxs=AutoXS(nus,"ExoMol","CO",xsmode="MODIT",autogridconv=False)
xsv1=autoxs.xsection(T,P)
autoxs=AutoXS(nus,"ExoMol","CO",xsmode="DIT")
xsv2=autoxs.xsection(T,P)
fig=plt.figure()
ax=fig.add_subplot(211)
plt.plot(nus,xsv0,label="LPF",color="C0",alpha=0.4)
plt.plot(nus,xsv1,label="MODIT",color="C1",alpha=0.4)
plt.plot(nus,xsv2,label="DIT",color="C2",alpha=0.4)
plt.legend(loc="upper right")
plt.plot(nus,xsv1-xsv0,color="C1",alpha=0.4)
plt.plot(nus,xsv2-xsv0,color="C2",alpha=0.4)
ax=fig.add_subplot(212)
plt.plot(nus,xsv2-xsv0,label="DIT-LPF",color="C2",alpha=0.4)
plt.plot(nus,xsv1-xsv0,label="MODIT-LPF",color="C1",alpha=0.7)
plt.legend(loc="upper right")
plt.show()
nusobs=numpy.linspace(1900.0,2300.0,10000,dtype=numpy.float64)
xsmode="MODIT"
nus,wav,res=nugrid(1900.0,2300.0,160000,"cm-1",xsmode=xsmode)
Parr=numpy.logspace(-8,2,100) #100 layers from 10^-8 bar to 10^2 bar
Tarr = 500.*(Parr/Parr[-1])**0.02
autort=AutoRT(nus,1.e5,2.33,Tarr,Parr,xsmode=xsmode,autogridconv=False) #g=1.e5 cm/s2, mmw=2.33
autort.addcia("H2-H2",0.74,0.74) #CIA, mmr(H)=0.74
autort.addmol("ExoMol","CO",0.01) #CO line, mmr(CO)=0.01
F1=autort.rtrun()
#F1o=autort.spectrum(nusobs,100000.0,20.0,0.0)
xsmode="DIT"
nus_,wav,res=nugrid(1900.0,2300.0,160000,"cm-1",xsmode=xsmode)
Parr=numpy.logspace(-8,2,100) #100 layers from 10^-8 bar to 10^2 bar
Tarr = 500.*(Parr/Parr[-1])**0.02
autort=AutoRT(nus_,1.e5,2.33,Tarr,Parr,xsmode=xsmode) #g=1.e5 cm/s2, mmw=2.33
autort.addcia("H2-H2",0.74,0.74) #CIA, mmr(H)=0.74
autort.addmol("ExoMol","CO",0.01) #CO line, mmr(CO)=0.01
F2=autort.rtrun()
#F2o=autort.spectrum(nusobs,100000.0,20.0,0.0)
xsmode="LPF"
nus,wav,res=nugrid(1900.0,2300.0,160000,"cm-1",xsmode=xsmode)
Parr=numpy.logspace(-8,2,100) #100 layers from 10^-8 bar to 10^2 bar
Tarr = 500.*(Parr/Parr[-1])**0.02
autort=AutoRT(nus,1.e5,2.33,Tarr,Parr,xsmode=xsmode) #g=1.e5 cm/s2, mmw=2.33
autort.addcia("H2-H2",0.74,0.74) #CIA, mmr(H)=0.74
autort.addmol("ExoMol","CO",0.01) #CO line, mmr(CO)=0.01
F0=autort.rtrun()
#F0o=autort.spectrum(nusobs,100000.0,20.0,0.0)
fig=plt.figure()
ax=fig.add_subplot(211)
plt.plot(nus,F0,label="LPF",color="C0",alpha=0.4)
plt.plot(nus,F1,label="MODIT",color="C1",alpha=0.4)
plt.plot(nus_,F2,label="DIT",color="C2",ls="dotted",alpha=0.4)
plt.plot(nus,F1-F0,color="C1",alpha=0.4)
plt.legend(loc="upper right")
ax=fig.add_subplot(212)
plt.plot(nus,F1-F0,label="MODIT-LPF",color="C1",alpha=0.4)
plt.legend(loc="upper right")
plt.show()
|
"""This module contains the general information for StorageControllerReference ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class StorageControllerReferenceConsts:
CONTROLLER_TYPE_FLASH = "FLASH"
CONTROLLER_TYPE_HBA = "HBA"
CONTROLLER_TYPE_M2 = "M2"
CONTROLLER_TYPE_NVME = "NVME"
CONTROLLER_TYPE_PCH = "PCH"
CONTROLLER_TYPE_PT = "PT"
CONTROLLER_TYPE_SAS = "SAS"
CONTROLLER_TYPE_SATA = "SATA"
CONTROLLER_TYPE_SD = "SD"
CONTROLLER_TYPE_EXTERNAL = "external"
CONTROLLER_TYPE_UNKNOWN = "unknown"
class StorageControllerReference(ManagedObject):
"""This is StorageControllerReference class."""
consts = StorageControllerReferenceConsts()
naming_props = set(['referencedRn'])
mo_meta = MoMeta("StorageControllerReference", "storageControllerReference", "controller-ref-[referenced_rn]", VersionMeta.Version321d, "InputOutput", 0x3f, [], ["read-only"], ['storageMiniStorage'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version321d, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"controller_id": MoPropertyMeta("controller_id", "controllerId", "ushort", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"controller_type": MoPropertyMeta("controller_type", "controllerType", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, None, None, None, ["FLASH", "HBA", "M2", "NVME", "PCH", "PT", "SAS", "SATA", "SD", "external", "unknown"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"referenced_rn": MoPropertyMeta("referenced_rn", "referencedRn", "string", VersionMeta.Version321d, MoPropertyMeta.NAMING, 0x8, 1, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version321d, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"controllerId": "controller_id",
"controllerType": "controller_type",
"dn": "dn",
"referencedRn": "referenced_rn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, referenced_rn, **kwargs):
self._dirty_mask = 0
self.referenced_rn = referenced_rn
self.child_action = None
self.controller_id = None
self.controller_type = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "StorageControllerReference", parent_mo_or_dn, **kwargs)
|
from bakujobs.models import Job, Category, Description
from froala_editor.widgets import FroalaEditor
from django import forms
class JobCreate(forms.ModelForm):
class Meta:
model = Job
#fields = ('job_title', 'company_name', 'category', 'job_description', 'job_type', 'location', 'description', 'website', 'email', 'phone', 'min_salary', 'max_salary', 'gender')
fields = ('job_title', 'category', 'job_description', 'job_type', 'description', 'min_salary', 'max_salary', 'gender')
def __init__(self, *args, **kwargs):
# self.user = kwargs.pop('user')
super(JobCreate, self).__init__(*args, **kwargs)
self.fields['job_description'].queryset = Description.objects.none()
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control'
})
if 'category' in self.data:
try:
category_id = int(self.data.get('category'))
self.fields['job_description'].queryset = Description.objects.filter(category_name_id=category_id).order_by('name')
except (ValueError, TypeError):
pass
elif self.instance.pk:
self.fields['job_description'].queryset = self.instance.category.description_set.order_by('name')
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import click
import pickle
import re
import copy
import numpy as np
import torch
import dnnlib
from torch_utils import misc
#----------------------------------------------------------------------------
def load_network_pkl(f, force_fp16=False):
data = _LegacyUnpickler(f).load()
# Legacy TensorFlow pickle => convert.
if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data):
tf_G, tf_D, tf_Gs = data
G = convert_tf_generator(tf_G)
D = convert_tf_discriminator(tf_D)
G_ema = convert_tf_generator(tf_Gs)
data = dict(G=G, D=D, G_ema=G_ema)
# Add missing fields.
if 'training_set_kwargs' not in data:
data['training_set_kwargs'] = None
if 'augment_pipe' not in data:
data['augment_pipe'] = None
# Validate contents.
assert isinstance(data['G'], torch.nn.Module)
assert isinstance(data['D'], torch.nn.Module)
assert isinstance(data['G_ema'], torch.nn.Module)
assert isinstance(data['training_set_kwargs'], (dict, type(None)))
assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None)))
# Force FP16.
if force_fp16:
for key in ['G', 'D', 'G_ema']:
old = data[key]
kwargs = copy.deepcopy(old.init_kwargs)
if key.startswith('G'):
kwargs.synthesis_kwargs = dnnlib.EasyDict(kwargs.get('synthesis_kwargs', {}))
kwargs.synthesis_kwargs.num_fp16_res = 4
kwargs.synthesis_kwargs.conv_clamp = 256
if key.startswith('D'):
kwargs.num_fp16_res = 4
kwargs.conv_clamp = 256
if kwargs != old.init_kwargs:
new = type(old)(**kwargs).eval().requires_grad_(False)
misc.copy_params_and_buffers(old, new, require_all=True)
data[key] = new
return data
#----------------------------------------------------------------------------
class _TFNetworkStub(dnnlib.EasyDict):
pass
class _LegacyUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'dnnlib.tflib.network' and name == 'Network':
return _TFNetworkStub
return super().find_class(module, name)
#----------------------------------------------------------------------------
def _collect_tf_params(tf_net):
# pylint: disable=protected-access
tf_params = dict()
def recurse(prefix, tf_net):
for name, value in tf_net.variables:
tf_params[prefix + name] = value
for name, comp in tf_net.components.items():
recurse(prefix + name + '/', comp)
recurse('', tf_net)
return tf_params
#----------------------------------------------------------------------------
def _populate_module_params(module, *patterns):
for name, tensor in misc.named_params_and_buffers(module):
found = False
value = None
for pattern, value_fn in zip(patterns[0::2], patterns[1::2]):
match = re.fullmatch(pattern, name)
if match:
found = True
if value_fn is not None:
value = value_fn(*match.groups())
break
try:
assert found
if value is not None:
tensor.copy_(torch.from_numpy(np.array(value)))
except:
print(name, list(tensor.shape))
raise
#----------------------------------------------------------------------------
def convert_tf_generator(tf_G):
if tf_G.version < 4:
raise ValueError('TensorFlow pickle version too low')
# Collect kwargs.
tf_kwargs = tf_G.static_kwargs
known_kwargs = set()
def kwarg(tf_name, default=None, none=None):
known_kwargs.add(tf_name)
val = tf_kwargs.get(tf_name, default)
return val if val is not None else none
# Convert kwargs.
kwargs = dnnlib.EasyDict(
z_dim = kwarg('latent_size', 1024),
c_dim = kwarg('label_size', 0),
w_dim = kwarg('dlatent_size', 1024),
img_resolution = kwarg('resolution', 1024),
img_channels = kwarg('num_channels', 3),
mapping_kwargs = dnnlib.EasyDict(
num_layers = kwarg('mapping_layers', 4),
embed_features = kwarg('label_fmaps', None),
layer_features = kwarg('mapping_fmaps', None),
activation = kwarg('mapping_nonlinearity', 'lrelu'),
lr_multiplier = kwarg('mapping_lrmul', 0.01),
w_avg_beta = kwarg('w_avg_beta', 0.995, none=1),
),
synthesis_kwargs = dnnlib.EasyDict(
channel_base = kwarg('fmap_base', 32768) * 2,
channel_max = kwarg('fmap_max', 1024),
num_fp16_res = kwarg('num_fp16_res', 0),
conv_clamp = kwarg('conv_clamp', None),
architecture = kwarg('architecture', 'skip'),
resample_filter = kwarg('resample_kernel', [1,3,3,1]),
use_noise = kwarg('use_noise', True),
activation = kwarg('nonlinearity', 'lrelu'),
),
)
# Check for unknown kwargs.
kwarg('truncation_psi')
kwarg('truncation_cutoff')
kwarg('style_mixing_prob')
kwarg('structure')
kwarg('resolution_h')
kwarg('resolution_w')
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
if len(unknown_kwargs) > 0:
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
# Collect params.
tf_params = _collect_tf_params(tf_G)
for name, value in list(tf_params.items()):
match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name)
if match:
r = kwargs.img_resolution // (2 ** int(match.group(1)))
tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value
kwargs.synthesis.kwargs.architecture = 'orig'
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
# Convert params.
from training import networks
G = networks.Generator(**kwargs).eval().requires_grad_(False)
# pylint: disable=unnecessary-lambda
_populate_module_params(G,
r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'],
r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose(),
r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'],
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose(),
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'],
r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0],
r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b4\.conv1\.bias', lambda: tf_params[f'synthesis/4x4/Conv/bias'],
r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[f'synthesis/noise0'][0, 0],
r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[f'synthesis/4x4/Conv/noise_strength'],
r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[f'synthesis/4x4/Conv/mod_weight'].transpose(),
r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[f'synthesis/4x4/Conv/mod_bias'] + 1,
r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/bias'],
r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0],
r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/noise_strength'],
r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1,
r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/bias'],
r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0],
r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/noise_strength'],
r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1,
r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/bias'],
r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1,
r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
r'.*\.resample_filter', None,
)
return G
#----------------------------------------------------------------------------
def convert_tf_discriminator(tf_D):
if tf_D.version < 4:
raise ValueError('TensorFlow pickle version too low')
# Collect kwargs.
tf_kwargs = tf_D.static_kwargs
known_kwargs = set()
def kwarg(tf_name, default=None):
known_kwargs.add(tf_name)
return tf_kwargs.get(tf_name, default)
# Convert kwargs.
kwargs = dnnlib.EasyDict(
c_dim = kwarg('label_size', 0),
img_resolution = kwarg('resolution', 1024),
img_channels = kwarg('num_channels', 3),
architecture = kwarg('architecture', 'resnet'),
channel_base = kwarg('fmap_base', 16384) * 2,
channel_max = kwarg('fmap_max', 512),
num_fp16_res = kwarg('num_fp16_res', 0),
conv_clamp = kwarg('conv_clamp', None),
cmap_dim = kwarg('mapping_fmaps', None),
block_kwargs = dnnlib.EasyDict(
activation = kwarg('nonlinearity', 'lrelu'),
resample_filter = kwarg('resample_kernel', [1,3,3,1]),
freeze_layers = kwarg('freeze_layers', 0),
),
mapping_kwargs = dnnlib.EasyDict(
num_layers = kwarg('mapping_layers', 0),
embed_features = kwarg('mapping_fmaps', None),
layer_features = kwarg('mapping_fmaps', None),
activation = kwarg('nonlinearity', 'lrelu'),
lr_multiplier = kwarg('mapping_lrmul', 0.1),
),
epilogue_kwargs = dnnlib.EasyDict(
mbstd_group_size = kwarg('mbstd_group_size', 32),
mbstd_num_channels = kwarg('mbstd_num_features', 4),
activation = kwarg('nonlinearity', 'lrelu'),
),
)
# Check for unknown kwargs.
kwarg('structure')
kwarg('resolution_h')
kwarg('resolution_w')
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
if len(unknown_kwargs) > 0:
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
# Collect params.
tf_params = _collect_tf_params(tf_D)
for name, value in list(tf_params.items()):
match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name)
if match:
r = kwargs.img_resolution // (2 ** int(match.group(1)))
tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value
kwargs.architecture = 'orig'
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
# Convert params.
from training import networks
D = networks.Discriminator(**kwargs).eval().requires_grad_(False)
# pylint: disable=unnecessary-lambda
_populate_module_params(D,
r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose(3, 2, 0, 1),
r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'],
r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose(3, 2, 0, 1),
r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'],
r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose(3, 2, 0, 1),
r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose(),
r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'],
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose(),
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'],
r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose(3, 2, 0, 1),
r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'],
r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose(),
r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'],
r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose(),
r'b4\.out\.bias', lambda: tf_params[f'Output/bias'],
r'.*\.resample_filter', None,
)
return D
#----------------------------------------------------------------------------
@click.command()
@click.option('--source', help='Input pickle', required=True, metavar='PATH')
@click.option('--dest', help='Output pickle', required=True, metavar='PATH')
@click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True)
def convert_network_pickle(source, dest, force_fp16):
"""Convert legacy network pickle into the native PyTorch format.
The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA.
It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks.
Example:
\b
python legacy.py \\
--source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\
--dest=stylegan2-cat-config-f.pkl
"""
print(f'Loading "{source}"...')
with dnnlib.util.open_url(source) as f:
data = load_network_pkl(f, force_fp16=force_fp16)
print(f'Saving "{dest}"...')
with open(dest, 'wb') as f:
pickle.dump(data, f)
print('Done.')
#----------------------------------------------------------------------------
if __name__ == "__main__":
convert_network_pickle() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2021
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""Base class for Telegram Objects."""
try:
import ujson as json
except ImportError:
import json # type: ignore[no-redef]
import warnings
from typing import TYPE_CHECKING, List, Optional, Tuple, Type, TypeVar
from telegram.utils.types import JSONDict
if TYPE_CHECKING:
from telegram import Bot
TO = TypeVar('TO', bound='TelegramObject', covariant=True)
class TelegramObject:
"""Base class for most telegram objects."""
_id_attrs: Tuple[object, ...] = ()
def __str__(self) -> str:
return str(self.to_dict())
def __getitem__(self, item: str) -> object:
return self.__dict__[item]
@staticmethod
def parse_data(data: Optional[JSONDict]) -> Optional[JSONDict]:
return None if data is None else data.copy()
@classmethod
def de_json(cls: Type[TO], data: Optional[JSONDict], bot: 'Bot') -> Optional[TO]:
data = cls.parse_data(data)
if data is None:
return None
if cls == TelegramObject:
return cls()
return cls(bot=bot, **data) # type: ignore[call-arg]
@classmethod
def de_list(cls: Type[TO], data: Optional[List[JSONDict]], bot: 'Bot') -> List[Optional[TO]]:
if not data:
return []
return [cls.de_json(d, bot) for d in data]
def to_json(self) -> str:
"""
Returns:
:obj:`str`
"""
return json.dumps(self.to_dict())
def to_dict(self) -> JSONDict:
data = dict()
for key in iter(self.__dict__):
if key == 'bot' or key.startswith('_'):
continue
value = self.__dict__[key]
if value is not None:
if hasattr(value, 'to_dict'):
data[key] = value.to_dict()
else:
data[key] = value
if data.get('from_user'):
data['from'] = data.pop('from_user', None)
return data
def __eq__(self, other: object) -> bool:
if isinstance(other, self.__class__):
if self._id_attrs == ():
warnings.warn(
f"Objects of type {self.__class__.__name__} can not be meaningfully tested for"
" equivalence."
)
if other._id_attrs == ():
warnings.warn(
f"Objects of type {other.__class__.__name__} can not be meaningfully tested"
" for equivalence."
)
return self._id_attrs == other._id_attrs
return super().__eq__(other) # pylint: disable=no-member
def __hash__(self) -> int:
if self._id_attrs:
return hash((self.__class__, self._id_attrs)) # pylint: disable=no-member
return super().__hash__()
|
# pylint: disable=invalid-name,no-self-use
import pytest
import numpy
import torch
import torch.nn.init
from torch.nn.modules.rnn import LSTM
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.nn import InitializerApplicator
from allennlp.nn.util import sort_batch_by_length
class TestAugmentedLSTM(AllenNlpTestCase):
def setUp(self):
super(TestAugmentedLSTM, self).setUp()
tensor = torch.rand([5, 7, 10])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 2:, :] = 0
tensor[3, 6:, :] = 0
tensor = torch.autograd.Variable(tensor)
sequence_lengths = torch.autograd.Variable(torch.LongTensor([3, 4, 2, 6, 7]))
self.random_tensor = tensor
self.sequence_lengths = sequence_lengths
def test_variable_length_sequences_return_correctly_padded_outputs(self):
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(self.random_tensor, self.sequence_lengths)
tensor = pack_padded_sequence(sorted_tensor, sorted_sequence.data.tolist(), batch_first=True)
lstm = AugmentedLstm(10, 11)
output, _ = lstm(tensor)
output_sequence, _ = pad_packed_sequence(output, batch_first=True)
numpy.testing.assert_array_equal(output_sequence.data[1, 6:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 3:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[4, 2:, :].numpy(), 0.0)
def test_variable_length_sequences_run_backward_return_correctly_padded_outputs(self):
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(self.random_tensor, self.sequence_lengths)
tensor = pack_padded_sequence(sorted_tensor, sorted_sequence.data.tolist(), batch_first=True)
lstm = AugmentedLstm(10, 11, go_forward=False)
output, _ = lstm(tensor)
output_sequence, _ = pad_packed_sequence(output, batch_first=True)
numpy.testing.assert_array_equal(output_sequence.data[1, 6:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[2, 4:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[3, 3:, :].numpy(), 0.0)
numpy.testing.assert_array_equal(output_sequence.data[4, 2:, :].numpy(), 0.0)
def test_augmented_lstm_computes_same_function_as_pytorch_lstm(self):
augmented_lstm = AugmentedLstm(10, 11)
pytorch_lstm = LSTM(10, 11, num_layers=1, batch_first=True)
# Initialize all weights to be == 1.
initializer = InitializerApplicator([(".*", lambda tensor: torch.nn.init.constant(tensor, 1.))])
initializer(augmented_lstm)
initializer(pytorch_lstm)
initial_state = torch.autograd.Variable(torch.zeros([1, 5, 11]))
initial_memory = torch.autograd.Variable(torch.zeros([1, 5, 11]))
# Use bigger numbers to avoid floating point instability.
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(self.random_tensor * 5., self.sequence_lengths)
lstm_input = pack_padded_sequence(sorted_tensor, sorted_sequence.data.tolist(), batch_first=True)
augmented_output, augmented_state = augmented_lstm(lstm_input, (initial_state, initial_memory))
pytorch_output, pytorch_state = pytorch_lstm(lstm_input, (initial_state, initial_memory))
pytorch_output_sequence, _ = pad_packed_sequence(pytorch_output, batch_first=True)
augmented_output_sequence, _ = pad_packed_sequence(augmented_output, batch_first=True)
numpy.testing.assert_array_almost_equal(pytorch_output_sequence.data.numpy(),
augmented_output_sequence.data.numpy(), decimal=4)
numpy.testing.assert_array_almost_equal(pytorch_state[0].data.numpy(),
augmented_state[0].data.numpy(), decimal=4)
numpy.testing.assert_array_almost_equal(pytorch_state[1].data.numpy(),
augmented_state[1].data.numpy(), decimal=4)
def test_augmented_lstm_works_with_highway_connections(self):
augmented_lstm = AugmentedLstm(10, 11, use_highway=True)
sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(self.random_tensor, self.sequence_lengths)
lstm_input = pack_padded_sequence(sorted_tensor, sorted_sequence.data.tolist(), batch_first=True)
augmented_lstm(lstm_input)
def test_augmented_lstm_throws_error_on_non_packed_sequence_input(self):
lstm = AugmentedLstm(3, 5)
tensor = torch.rand([5, 7, 9])
with pytest.raises(ConfigurationError):
lstm(tensor)
def test_augmented_lstm_is_initialized_with_correct_biases(self):
lstm = AugmentedLstm(2, 3)
true_state_bias = numpy.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
numpy.testing.assert_array_equal(lstm.state_linearity.bias.data.numpy(), true_state_bias)
# Non-highway case.
lstm = AugmentedLstm(2, 3, use_highway=False)
true_state_bias = numpy.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0])
numpy.testing.assert_array_equal(lstm.state_linearity.bias.data.numpy(), true_state_bias)
|
import os
import re
import string
import sys
import time
import types
OUT_ENCODING = 'utf-8'
version = (
(sys.hexversion & (0xff << 24)) >> 24,
(sys.hexversion & (0xff << 16)) >> 16
)
if version[0] >= 3:
#noinspection PyUnresolvedReferences
import builtins as the_builtins
string = "".__class__
STR_TYPES = (getattr(the_builtins, "bytes"), str)
NUM_TYPES = (int, float)
SIMPLEST_TYPES = NUM_TYPES + STR_TYPES + (None.__class__,)
EASY_TYPES = NUM_TYPES + STR_TYPES + (None.__class__, dict, tuple, list)
def the_exec(source, context):
exec (source, context)
# noinspection PyUnresolvedReferences
from inspect import getfullargspec
else: # < 3.0
import __builtin__ as the_builtins
STR_TYPES = (getattr(the_builtins, "unicode"), str)
NUM_TYPES = (int, long, float)
SIMPLEST_TYPES = NUM_TYPES + STR_TYPES + (types.NoneType,)
EASY_TYPES = NUM_TYPES + STR_TYPES + (types.NoneType, dict, tuple, list)
def the_exec(source, context):
#noinspection PyRedundantParentheses
exec (source) in context
def getfullargspec(func):
import inspect
return inspect.getargspec(func) + ([], None, {})
if version[0] == 2 and version[1] < 4:
HAS_DECORATORS = False
def lstrip(s, prefix):
i = 0
while s[i] == prefix:
i += 1
return s[i:]
else:
HAS_DECORATORS = True
lstrip = string.lstrip
# return type inference helper table
INT_LIT = '0'
FLOAT_LIT = '0.0'
DICT_LIT = '{}'
LIST_LIT = '[]'
TUPLE_LIT = '()'
BOOL_LIT = 'False'
RET_TYPE = {# {'type_name': 'value_string'} lookup table
# chaining
"self": "self",
"self.": "self",
# int
"int": INT_LIT,
"Int": INT_LIT,
"integer": INT_LIT,
"Integer": INT_LIT,
"short": INT_LIT,
"long": INT_LIT,
"number": INT_LIT,
"Number": INT_LIT,
# float
"float": FLOAT_LIT,
"Float": FLOAT_LIT,
"double": FLOAT_LIT,
"Double": FLOAT_LIT,
"floating": FLOAT_LIT,
# boolean
"bool": BOOL_LIT,
"boolean": BOOL_LIT,
"Bool": BOOL_LIT,
"Boolean": BOOL_LIT,
"True": BOOL_LIT,
"true": BOOL_LIT,
"False": BOOL_LIT,
"false": BOOL_LIT,
# list
'list': LIST_LIT,
'List': LIST_LIT,
'[]': LIST_LIT,
# tuple
"tuple": TUPLE_LIT,
"sequence": TUPLE_LIT,
"Sequence": TUPLE_LIT,
# dict
"dict": DICT_LIT,
"Dict": DICT_LIT,
"dictionary": DICT_LIT,
"Dictionary": DICT_LIT,
"map": DICT_LIT,
"Map": DICT_LIT,
"hashtable": DICT_LIT,
"Hashtable": DICT_LIT,
"{}": DICT_LIT,
# "objects"
"object": "object()",
}
if version[0] < 3:
UNICODE_LIT = 'u""'
BYTES_LIT = '""'
RET_TYPE.update({
'string': BYTES_LIT,
'String': BYTES_LIT,
'str': BYTES_LIT,
'Str': BYTES_LIT,
'character': BYTES_LIT,
'char': BYTES_LIT,
'unicode': UNICODE_LIT,
'Unicode': UNICODE_LIT,
'bytes': BYTES_LIT,
'byte': BYTES_LIT,
'Bytes': BYTES_LIT,
'Byte': BYTES_LIT,
})
DEFAULT_STR_LIT = BYTES_LIT
# also, files:
RET_TYPE.update({
'file': "file('/dev/null')",
})
def ensureUnicode(data):
if type(data) == str:
return data.decode(OUT_ENCODING, 'replace')
return unicode(data)
else:
UNICODE_LIT = '""'
BYTES_LIT = 'b""'
RET_TYPE.update({
'string': UNICODE_LIT,
'String': UNICODE_LIT,
'str': UNICODE_LIT,
'Str': UNICODE_LIT,
'character': UNICODE_LIT,
'char': UNICODE_LIT,
'unicode': UNICODE_LIT,
'Unicode': UNICODE_LIT,
'bytes': BYTES_LIT,
'byte': BYTES_LIT,
'Bytes': BYTES_LIT,
'Byte': BYTES_LIT,
})
DEFAULT_STR_LIT = UNICODE_LIT
# also, files: we can't provide an easy expression on py3k
RET_TYPE.update({
'file': None,
})
def ensureUnicode(data):
if type(data) == bytes:
return data.decode(OUT_ENCODING, 'replace')
return str(data)
if version[0] > 2:
import io # in 3.0
def fopen(name, mode):
kwargs = {}
if 'b' not in mode:
kwargs['encoding'] = OUT_ENCODING
return io.open(name, mode, **kwargs)
else:
fopen = open
if sys.platform == 'cli':
#noinspection PyUnresolvedReferences
from System import DateTime
class Timer(object):
def __init__(self):
self.started = DateTime.Now
def elapsed(self):
return (DateTime.Now - self.started).TotalMilliseconds
else:
class Timer(object):
def __init__(self):
self.started = time.time()
def elapsed(self):
return int((time.time() - self.started) * 1000)
IS_JAVA = hasattr(os, "java")
BUILTIN_MOD_NAME = the_builtins.__name__
IDENT_PATTERN = "[A-Za-z_][0-9A-Za-z_]*" # re pattern for identifier
STR_CHAR_PATTERN = "[0-9A-Za-z_.,\+\-&\*% ]"
DOC_FUNC_RE = re.compile("(?:.*\.)?(\w+)\(([^\)]*)\).*") # $1 = function name, $2 = arglist
SANE_REPR_RE = re.compile(IDENT_PATTERN + "(?:\(.*\))?") # identifier with possible (...), go catches
IDENT_RE = re.compile("(" + IDENT_PATTERN + ")") # $1 = identifier
STARS_IDENT_RE = re.compile("(\*?\*?" + IDENT_PATTERN + ")") # $1 = identifier, maybe with a * or **
IDENT_EQ_RE = re.compile("(" + IDENT_PATTERN + "\s*=)") # $1 = identifier with a following '='
SIMPLE_VALUE_RE = re.compile(
"(\([+-]?[0-9](?:\s*,\s*[+-]?[0-9])*\))|" + # a numeric tuple, e.g. in pygame
"([+-]?[0-9]+\.?[0-9]*(?:[Ee]?[+-]?[0-9]+\.?[0-9]*)?)|" + # number
"('" + STR_CHAR_PATTERN + "*')|" + # single-quoted string
'("' + STR_CHAR_PATTERN + '*")|' + # double-quoted string
"(\[\])|" +
"(\{\})|" +
"(\(\))|" +
"(True|False|None)"
) # $? = sane default value
# Some values are known to be of no use in source and needs to be suppressed.
# Dict is keyed by module names, with "*" meaning "any module";
# values are lists of names of members whose value must be pruned.
SKIP_VALUE_IN_MODULE = {
"sys": (
"modules", "path_importer_cache", "argv", "builtins",
"last_traceback", "last_type", "last_value", "builtin_module_names",
),
"posix": (
"environ",
),
"nt": (
"environ",
),
"zipimport": (
"_zip_directory_cache",
),
"*": (BUILTIN_MOD_NAME,)
}
# {"module": ("name",..)}: omit the names from the skeleton at all.
OMIT_NAME_IN_MODULE = {}
if version[0] >= 3:
v = OMIT_NAME_IN_MODULE.get(BUILTIN_MOD_NAME, []) + ["True", "False", "None", "__debug__"]
OMIT_NAME_IN_MODULE[BUILTIN_MOD_NAME] = v
if IS_JAVA and version > (2, 4): # in 2.5.1 things are way weird!
OMIT_NAME_IN_MODULE['_codecs'] = ['EncodingMap']
OMIT_NAME_IN_MODULE['_hashlib'] = ['Hash']
ADD_VALUE_IN_MODULE = {
"sys": ("exc_value = Exception()", "exc_traceback=None"), # only present after an exception in current thread
}
# Some values are special and are better represented by hand-crafted constructs.
# Dict is keyed by (module name, member name) and value is the replacement.
REPLACE_MODULE_VALUES = {
("numpy.core.multiarray", "typeinfo"): "{}",
("psycopg2._psycopg", "string_types"): "{}", # badly mangled __eq__ breaks fmtValue
("PyQt5.QtWidgets", "qApp") : "QApplication()", # instead of None
}
if version[0] <= 2:
REPLACE_MODULE_VALUES[(BUILTIN_MOD_NAME, "None")] = "object()"
for std_file in ("stdin", "stdout", "stderr"):
REPLACE_MODULE_VALUES[("sys", std_file)] = "open('')" #
# Some functions and methods of some builtin classes have special signatures.
# {("class", "method"): ("signature_string")}
PREDEFINED_BUILTIN_SIGS = { #TODO: user-skeleton
("type", "__init__"): "(cls, what, bases=None, dict=None)", # two sigs squeezed into one
("object", "__init__"): "(self)",
("object", "__new__"): "(cls, *more)", # only for the sake of parameter names readability
("object", "__subclasshook__"): "(cls, subclass)", # trusting PY-1818 on sig
("int", "__init__"): "(self, x, base=10)", # overrides a fake
("list", "__init__"): "(self, seq=())",
("tuple", "__init__"): "(self, seq=())", # overrides a fake
("set", "__init__"): "(self, seq=())",
("dict", "__init__"): "(self, seq=None, **kwargs)",
("property", "__init__"): "(self, fget=None, fset=None, fdel=None, doc=None)",
# TODO: infer, doc comments have it
("dict", "update"): "(self, E=None, **F)", # docstring nearly lies
(None, "zip"): "(seq1, seq2, *more_seqs)",
(None, "range"): "(start=None, stop=None, step=None)", # suboptimal: allows empty arglist
(None, "filter"): "(function_or_none, sequence)",
(None, "iter"): "(source, sentinel=None)",
(None, "getattr"): "(object, name, default=None)",
('frozenset', "__init__"): "(self, seq=())",
("bytearray", "__init__"): "(self, source=None, encoding=None, errors='strict')",
}
if version[0] < 3:
PREDEFINED_BUILTIN_SIGS[
("unicode", "__init__")] = "(self, string=u'', encoding=None, errors='strict')" # overrides a fake
PREDEFINED_BUILTIN_SIGS[("super", "__init__")] = "(self, type1, type2=None)"
PREDEFINED_BUILTIN_SIGS[
(None, "min")] = "(*args, **kwargs)" # too permissive, but py2.x won't allow a better sig
PREDEFINED_BUILTIN_SIGS[(None, "max")] = "(*args, **kwargs)"
PREDEFINED_BUILTIN_SIGS[("str", "__init__")] = "(self, string='')" # overrides a fake
PREDEFINED_BUILTIN_SIGS[(None, "print")] = "(*args, **kwargs)" # can't do better in 2.x
else:
PREDEFINED_BUILTIN_SIGS[("super", "__init__")] = "(self, type1=None, type2=None)"
PREDEFINED_BUILTIN_SIGS[(None, "min")] = "(*args, key=None)"
PREDEFINED_BUILTIN_SIGS[(None, "max")] = "(*args, key=None)"
PREDEFINED_BUILTIN_SIGS[
(None, "open")] = "(file, mode='r', buffering=None, encoding=None, errors=None, newline=None, closefd=True)"
PREDEFINED_BUILTIN_SIGS[
("str", "__init__")] = "(self, value='', encoding=None, errors='strict')" # overrides a fake
PREDEFINED_BUILTIN_SIGS[("str", "format")] = "(self, *args, **kwargs)"
PREDEFINED_BUILTIN_SIGS[
("bytes", "__init__")] = "(self, value=b'', encoding=None, errors='strict')" # overrides a fake
PREDEFINED_BUILTIN_SIGS[("bytes", "format")] = "(self, *args, **kwargs)"
PREDEFINED_BUILTIN_SIGS[(None, "print")] = "(self, *args, sep=' ', end='\\n', file=None)" # proper signature
if (2, 6) <= version < (3, 0):
PREDEFINED_BUILTIN_SIGS[("unicode", "format")] = "(self, *args, **kwargs)"
PREDEFINED_BUILTIN_SIGS[("str", "format")] = "(self, *args, **kwargs)"
if version == (2, 5):
PREDEFINED_BUILTIN_SIGS[("unicode", "splitlines")] = "(keepends=None)" # a typo in docstring there
if version >= (2, 7):
PREDEFINED_BUILTIN_SIGS[
("enumerate", "__init__")] = "(self, iterable, start=0)" # dosctring omits this completely.
if version < (3, 3):
datetime_mod = "datetime"
else:
datetime_mod = "_datetime"
# NOTE: per-module signature data may be lazily imported
# keyed by (module_name, class_name, method_name). PREDEFINED_BUILTIN_SIGS might be a layer of it.
# value is ("signature", "return_literal")
PREDEFINED_MOD_CLASS_SIGS = { #TODO: user-skeleton
(BUILTIN_MOD_NAME, None, 'divmod'): ("(x, y)", "(0, 0)"),
("binascii", None, "hexlify"): ("(data)", BYTES_LIT),
("binascii", None, "unhexlify"): ("(hexstr)", BYTES_LIT),
("time", None, "ctime"): ("(seconds=None)", DEFAULT_STR_LIT),
("_struct", None, "pack"): ("(fmt, *args)", BYTES_LIT),
("_struct", None, "pack_into"): ("(fmt, buffer, offset, *args)", None),
("_struct", None, "unpack"): ("(fmt, string)", None),
("_struct", None, "unpack_from"): ("(fmt, buffer, offset=0)", None),
("_struct", None, "calcsize"): ("(fmt)", INT_LIT),
("_struct", "Struct", "__init__"): ("(self, fmt)", None),
("_struct", "Struct", "pack"): ("(self, *args)", BYTES_LIT),
("_struct", "Struct", "pack_into"): ("(self, buffer, offset, *args)", None),
("_struct", "Struct", "unpack"): ("(self, string)", None),
("_struct", "Struct", "unpack_from"): ("(self, buffer, offset=0)", None),
(datetime_mod, "date", "__new__"): ("(cls, year=None, month=None, day=None)", None),
(datetime_mod, "date", "fromordinal"): ("(cls, ordinal)", "date(1,1,1)"),
(datetime_mod, "date", "fromtimestamp"): ("(cls, timestamp)", "date(1,1,1)"),
(datetime_mod, "date", "isocalendar"): ("(self)", "(1, 1, 1)"),
(datetime_mod, "date", "isoformat"): ("(self)", DEFAULT_STR_LIT),
(datetime_mod, "date", "isoweekday"): ("(self)", INT_LIT),
(datetime_mod, "date", "replace"): ("(self, year=None, month=None, day=None)", "date(1,1,1)"),
(datetime_mod, "date", "strftime"): ("(self, format)", DEFAULT_STR_LIT),
(datetime_mod, "date", "timetuple"): ("(self)", "(0, 0, 0, 0, 0, 0, 0, 0, 0)"),
(datetime_mod, "date", "today"): ("(self)", "date(1, 1, 1)"),
(datetime_mod, "date", "toordinal"): ("(self)", INT_LIT),
(datetime_mod, "date", "weekday"): ("(self)", INT_LIT),
(datetime_mod, "timedelta", "__new__"
): (
"(cls, days=None, seconds=None, microseconds=None, milliseconds=None, minutes=None, hours=None, weeks=None)",
None),
(datetime_mod, "datetime", "__new__"
): (
"(cls, year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, tzinfo=None)",
None),
(datetime_mod, "datetime", "astimezone"): ("(self, tz)", "datetime(1, 1, 1)"),
(datetime_mod, "datetime", "combine"): ("(cls, date, time)", "datetime(1, 1, 1)"),
(datetime_mod, "datetime", "date"): ("(self)", "datetime(1, 1, 1)"),
(datetime_mod, "datetime", "fromtimestamp"): ("(cls, timestamp, tz=None)", "datetime(1, 1, 1)"),
(datetime_mod, "datetime", "isoformat"): ("(self, sep='T')", DEFAULT_STR_LIT),
(datetime_mod, "datetime", "now"): ("(cls, tz=None)", "datetime(1, 1, 1)"),
(datetime_mod, "datetime", "strptime"): ("(cls, date_string, format)", DEFAULT_STR_LIT),
(datetime_mod, "datetime", "replace" ):
(
"(self, year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, tzinfo=None)",
"datetime(1, 1, 1)"),
(datetime_mod, "datetime", "time"): ("(self)", "time(0, 0)"),
(datetime_mod, "datetime", "timetuple"): ("(self)", "(0, 0, 0, 0, 0, 0, 0, 0, 0)"),
(datetime_mod, "datetime", "timetz"): ("(self)", "time(0, 0)"),
(datetime_mod, "datetime", "utcfromtimestamp"): ("(self, timestamp)", "datetime(1, 1, 1)"),
(datetime_mod, "datetime", "utcnow"): ("(cls)", "datetime(1, 1, 1)"),
(datetime_mod, "datetime", "utctimetuple"): ("(self)", "(0, 0, 0, 0, 0, 0, 0, 0, 0)"),
(datetime_mod, "time", "__new__"): (
"(cls, hour=None, minute=None, second=None, microsecond=None, tzinfo=None)", None),
(datetime_mod, "time", "isoformat"): ("(self)", DEFAULT_STR_LIT),
(datetime_mod, "time", "replace"): (
"(self, hour=None, minute=None, second=None, microsecond=None, tzinfo=None)", "time(0, 0)"),
(datetime_mod, "time", "strftime"): ("(self, format)", DEFAULT_STR_LIT),
(datetime_mod, "tzinfo", "dst"): ("(self, date_time)", INT_LIT),
(datetime_mod, "tzinfo", "fromutc"): ("(self, date_time)", "datetime(1, 1, 1)"),
(datetime_mod, "tzinfo", "tzname"): ("(self, date_time)", DEFAULT_STR_LIT),
(datetime_mod, "tzinfo", "utcoffset"): ("(self, date_time)", INT_LIT),
("_io", None, "open"): ("(name, mode=None, buffering=None)", "file('/dev/null')"),
("_io", "FileIO", "read"): ("(self, size=-1)", DEFAULT_STR_LIT),
("_fileio", "_FileIO", "read"): ("(self, size=-1)", DEFAULT_STR_LIT),
("thread", None, "start_new"): ("(function, args, kwargs=None)", INT_LIT),
("_thread", None, "start_new"): ("(function, args, kwargs=None)", INT_LIT),
("itertools", "groupby", "__init__"): ("(self, iterable, key=None)", None),
("itertools", None, "groupby"): ("(iterable, key=None)", LIST_LIT),
("cStringIO", "OutputType", "seek"): ("(self, position, mode=0)", None),
("cStringIO", "InputType", "seek"): ("(self, position, mode=0)", None),
# NOTE: here we stand on shaky ground providing sigs for 3rd-party modules, though well-known
("numpy.core.multiarray", "ndarray", "__array__"): ("(self, dtype=None)", None),
("numpy.core.multiarray", None, "arange"): ("(start=None, stop=None, step=None, dtype=None)", None),
# same as range()
("numpy.core.multiarray", None, "set_numeric_ops"): ("(**ops)", None),
("numpy.random.mtrand", None, "rand"): ("(*dn)", None),
("numpy.random.mtrand", None, "randn"): ("(*dn)", None),
("numpy.core.multiarray", "ndarray", "reshape"): ("(self, shape, *shapes, order='C')", None),
("numpy.core.multiarray", "ndarray", "resize"): ("(self, *new_shape, refcheck=True)", None),
}
bin_collections_names = ['collections', '_collections']
for name in bin_collections_names:
PREDEFINED_MOD_CLASS_SIGS[(name, "deque", "__init__")] = ("(self, iterable=(), maxlen=None)", None)
PREDEFINED_MOD_CLASS_SIGS[(name, "defaultdict", "__init__")] = ("(self, default_factory=None, **kwargs)", None)
if version[0] < 3:
PREDEFINED_MOD_CLASS_SIGS[("exceptions", "BaseException", "__unicode__")] = ("(self)", UNICODE_LIT)
PREDEFINED_MOD_CLASS_SIGS[("itertools", "product", "__init__")] = ("(self, *iterables, **kwargs)", LIST_LIT)
else:
PREDEFINED_MOD_CLASS_SIGS[("itertools", "product", "__init__")] = ("(self, *iterables, repeat=1)", LIST_LIT)
if version[0] < 3:
PREDEFINED_MOD_CLASS_SIGS[("PyQt4.QtCore", None, "pyqtSlot")] = (
"(*types, **keywords)", None) # doc assumes py3k syntax
# known properties of modules
# {{"module": {"class", "property" : ("letters", ("getter", "type"))}},
# where letters is any set of r,w,d (read, write, del) and "getter" is a source of typed getter.
# if value is None, the property should be omitted.
# read-only properties that return an object are not listed.
G_OBJECT = ("lambda self: object()", None)
G_TYPE = ("lambda self: type(object)", "type")
G_DICT = ("lambda self: {}", "dict")
G_STR = ("lambda self: ''", "string")
G_TUPLE = ("lambda self: tuple()", "tuple")
G_FLOAT = ("lambda self: 0.0", "float")
G_INT = ("lambda self: 0", "int")
G_BOOL = ("lambda self: True", "bool")
KNOWN_PROPS = {
BUILTIN_MOD_NAME: {
("object", '__class__'): ('r', G_TYPE),
('complex', 'real'): ('r', G_FLOAT),
('complex', 'imag'): ('r', G_FLOAT),
("file", 'softspace'): ('r', G_BOOL),
("file", 'name'): ('r', G_STR),
("file", 'encoding'): ('r', G_STR),
("file", 'mode'): ('r', G_STR),
("file", 'closed'): ('r', G_BOOL),
("file", 'newlines'): ('r', G_STR),
("slice", 'start'): ('r', G_INT),
("slice", 'step'): ('r', G_INT),
("slice", 'stop'): ('r', G_INT),
("super", '__thisclass__'): ('r', G_TYPE),
("super", '__self__'): ('r', G_TYPE),
("super", '__self_class__'): ('r', G_TYPE),
("type", '__basicsize__'): ('r', G_INT),
("type", '__itemsize__'): ('r', G_INT),
("type", '__base__'): ('r', G_TYPE),
("type", '__flags__'): ('r', G_INT),
("type", '__mro__'): ('r', G_TUPLE),
("type", '__bases__'): ('r', G_TUPLE),
("type", '__dictoffset__'): ('r', G_INT),
("type", '__dict__'): ('r', G_DICT),
("type", '__name__'): ('r', G_STR),
("type", '__weakrefoffset__'): ('r', G_INT),
},
"exceptions": {
("BaseException", '__dict__'): ('r', G_DICT),
("BaseException", 'message'): ('rwd', G_STR),
("BaseException", 'args'): ('r', G_TUPLE),
("EnvironmentError", 'errno'): ('rwd', G_INT),
("EnvironmentError", 'message'): ('rwd', G_STR),
("EnvironmentError", 'strerror'): ('rwd', G_INT),
("EnvironmentError", 'filename'): ('rwd', G_STR),
("SyntaxError", 'text'): ('rwd', G_STR),
("SyntaxError", 'print_file_and_line'): ('rwd', G_BOOL),
("SyntaxError", 'filename'): ('rwd', G_STR),
("SyntaxError", 'lineno'): ('rwd', G_INT),
("SyntaxError", 'offset'): ('rwd', G_INT),
("SyntaxError", 'msg'): ('rwd', G_STR),
("SyntaxError", 'message'): ('rwd', G_STR),
("SystemExit", 'message'): ('rwd', G_STR),
("SystemExit", 'code'): ('rwd', G_OBJECT),
("UnicodeDecodeError", '__basicsize__'): None,
("UnicodeDecodeError", '__itemsize__'): None,
("UnicodeDecodeError", '__base__'): None,
("UnicodeDecodeError", '__flags__'): ('rwd', G_INT),
("UnicodeDecodeError", '__mro__'): None,
("UnicodeDecodeError", '__bases__'): None,
("UnicodeDecodeError", '__dictoffset__'): None,
("UnicodeDecodeError", '__dict__'): None,
("UnicodeDecodeError", '__name__'): None,
("UnicodeDecodeError", '__weakrefoffset__'): None,
("UnicodeEncodeError", 'end'): ('rwd', G_INT),
("UnicodeEncodeError", 'encoding'): ('rwd', G_STR),
("UnicodeEncodeError", 'object'): ('rwd', G_OBJECT),
("UnicodeEncodeError", 'start'): ('rwd', G_INT),
("UnicodeEncodeError", 'reason'): ('rwd', G_STR),
("UnicodeEncodeError", 'message'): ('rwd', G_STR),
("UnicodeTranslateError", 'end'): ('rwd', G_INT),
("UnicodeTranslateError", 'encoding'): ('rwd', G_STR),
("UnicodeTranslateError", 'object'): ('rwd', G_OBJECT),
("UnicodeTranslateError", 'start'): ('rwd', G_INT),
("UnicodeTranslateError", 'reason'): ('rwd', G_STR),
("UnicodeTranslateError", 'message'): ('rwd', G_STR),
},
'_ast': {
("AST", '__dict__'): ('rd', G_DICT),
},
'posix': {
("statvfs_result", 'f_flag'): ('r', G_INT),
("statvfs_result", 'f_bavail'): ('r', G_INT),
("statvfs_result", 'f_favail'): ('r', G_INT),
("statvfs_result", 'f_files'): ('r', G_INT),
("statvfs_result", 'f_frsize'): ('r', G_INT),
("statvfs_result", 'f_blocks'): ('r', G_INT),
("statvfs_result", 'f_ffree'): ('r', G_INT),
("statvfs_result", 'f_bfree'): ('r', G_INT),
("statvfs_result", 'f_namemax'): ('r', G_INT),
("statvfs_result", 'f_bsize'): ('r', G_INT),
("stat_result", 'st_ctime'): ('r', G_INT),
("stat_result", 'st_rdev'): ('r', G_INT),
("stat_result", 'st_mtime'): ('r', G_INT),
("stat_result", 'st_blocks'): ('r', G_INT),
("stat_result", 'st_gid'): ('r', G_INT),
("stat_result", 'st_nlink'): ('r', G_INT),
("stat_result", 'st_ino'): ('r', G_INT),
("stat_result", 'st_blksize'): ('r', G_INT),
("stat_result", 'st_dev'): ('r', G_INT),
("stat_result", 'st_size'): ('r', G_INT),
("stat_result", 'st_mode'): ('r', G_INT),
("stat_result", 'st_uid'): ('r', G_INT),
("stat_result", 'st_atime'): ('r', G_INT),
},
"pwd": {
("struct_pwent", 'pw_dir'): ('r', G_STR),
("struct_pwent", 'pw_gid'): ('r', G_INT),
("struct_pwent", 'pw_passwd'): ('r', G_STR),
("struct_pwent", 'pw_gecos'): ('r', G_STR),
("struct_pwent", 'pw_shell'): ('r', G_STR),
("struct_pwent", 'pw_name'): ('r', G_STR),
("struct_pwent", 'pw_uid'): ('r', G_INT),
("struct_passwd", 'pw_dir'): ('r', G_STR),
("struct_passwd", 'pw_gid'): ('r', G_INT),
("struct_passwd", 'pw_passwd'): ('r', G_STR),
("struct_passwd", 'pw_gecos'): ('r', G_STR),
("struct_passwd", 'pw_shell'): ('r', G_STR),
("struct_passwd", 'pw_name'): ('r', G_STR),
("struct_passwd", 'pw_uid'): ('r', G_INT),
},
"thread": {
("_local", '__dict__'): None
},
"xxsubtype": {
("spamdict", 'state'): ('r', G_INT),
("spamlist", 'state'): ('r', G_INT),
},
"zipimport": {
("zipimporter", 'prefix'): ('r', G_STR),
("zipimporter", 'archive'): ('r', G_STR),
("zipimporter", '_files'): ('r', G_DICT),
},
"_struct": {
("Struct", "size"): ('r', G_INT),
("Struct", "format"): ('r', G_STR),
},
datetime_mod: {
("datetime", "hour"): ('r', G_INT),
("datetime", "minute"): ('r', G_INT),
("datetime", "second"): ('r', G_INT),
("datetime", "microsecond"): ('r', G_INT),
("date", "day"): ('r', G_INT),
("date", "month"): ('r', G_INT),
("date", "year"): ('r', G_INT),
("time", "hour"): ('r', G_INT),
("time", "minute"): ('r', G_INT),
("time", "second"): ('r', G_INT),
("time", "microsecond"): ('r', G_INT),
("timedelta", "days"): ('r', G_INT),
("timedelta", "seconds"): ('r', G_INT),
("timedelta", "microseconds"): ('r', G_INT),
},
}
# Sometimes module X defines item foo but foo.__module__ == 'Y' instead of 'X';
# module Y just re-exports foo, and foo fakes being defined in Y.
# We list all such Ys keyed by X, all fully-qualified names:
# {"real_definer_module": ("fake_reexporter_module",..)}
KNOWN_FAKE_REEXPORTERS = {
"_collections": ('collections',),
"_functools": ('functools',),
"_socket": ('socket',), # .error, etc
"pyexpat": ('xml.parsers.expat',),
"_bsddb": ('bsddb.db',),
"pysqlite2._sqlite": ('pysqlite2.dbapi2',), # errors
"numpy.core.multiarray": ('numpy', 'numpy.core'),
"numpy.core._dotblas": ('numpy', 'numpy.core'),
"numpy.core.umath": ('numpy', 'numpy.core'),
"gtk._gtk": ('gtk', 'gtk.gdk',),
"gobject._gobject": ('gobject',),
"gnomecanvas": ("gnome.canvas",),
}
KNOWN_FAKE_BASES = []
# list of classes that pretend to be base classes but are mere wrappers, and their defining modules
# [(class, module),...] -- real objects, not names
#noinspection PyBroadException
try:
#noinspection PyUnresolvedReferences
import sip as sip_module # Qt specifically likes it
if hasattr(sip_module, 'wrapper'):
KNOWN_FAKE_BASES.append((sip_module.wrapper, sip_module))
if hasattr(sip_module, 'simplewrapper'):
KNOWN_FAKE_BASES.append((sip_module.simplewrapper, sip_module))
del sip_module
except:
pass
# This is a list of builtin classes to use fake init
FAKE_BUILTIN_INITS = (tuple, type, int, str)
if version[0] < 3:
FAKE_BUILTIN_INITS = FAKE_BUILTIN_INITS + (getattr(the_builtins, "unicode"),)
else:
FAKE_BUILTIN_INITS = FAKE_BUILTIN_INITS + (getattr(the_builtins, "str"), getattr(the_builtins, "bytes"))
# Some builtin methods are decorated, but this is hard to detect.
# {("class_name", "method_name"): "decorator"}
KNOWN_DECORATORS = {
("dict", "fromkeys"): "staticmethod",
("object", "__subclasshook__"): "classmethod",
("bytearray", "fromhex"): "classmethod",
("bytes", "fromhex"): "classmethod",
("bytearray", "maketrans"): "staticmethod",
("bytes", "maketrans"): "staticmethod",
("int", "from_bytes"): "classmethod",
("float", "fromhex"): "staticmethod",
}
classobj_txt = ( #TODO: user-skeleton
"class ___Classobj:" "\n"
" '''A mock class representing the old style class base.'''" "\n"
" __module__ = ''" "\n"
" __class__ = None" "\n"
"\n"
" def __init__(self):" "\n"
" pass" "\n"
" __dict__ = {}" "\n"
" __doc__ = ''" "\n"
)
MAC_STDLIB_PATTERN = re.compile("/System/Library/Frameworks/Python\\.framework/Versions/(.+)/lib/python\\1/(.+)")
MAC_SKIP_MODULES = ["test", "ctypes/test", "distutils/tests", "email/test",
"importlib/test", "json/tests", "lib2to3/tests",
"bsddb/test",
"sqlite3/test", "tkinter/test", "idlelib", "antigravity"]
POSIX_SKIP_MODULES = ["vtemodule", "PAMmodule", "_snackmodule", "/quodlibet/_mmkeys"]
BIN_MODULE_FNAME_PAT = re.compile(r'([a-zA-Z_][0-9a-zA-Z_]*)\.(?:pyc|pyo|(?:(?:[a-zA-Z_0-9\-]+\.)?(?:so|pyd)))$')
# possible binary module filename: letter, alphanum architecture per PEP-3149
TYPELIB_MODULE_FNAME_PAT = re.compile("([a-zA-Z_]+[0-9a-zA-Z]*)[0-9a-zA-Z-.]*\\.typelib")
MODULES_INSPECT_DIR = ['gi.repository']
TENSORFLOW_CONTRIB_OPS_MODULE_PATTERN = re.compile(r'tensorflow\.contrib\.(?:.+)\.(?:python\.ops\.|_dataset_ops$)')
CLASS_ATTR_BLACKLIST = [
'google.protobuf.pyext._message.Message._extensions_by_name',
'google.protobuf.pyext._message.Message._extensions_by_number',
'panda3d.core.ExecutionEnvironment.environment_variables',
]
SKELETON_HEADER_VERSION_LINE = re.compile(r'# by generator (?P<version>\d+\.\d+)')
SKELETON_HEADER_ORIGIN_LINE = re.compile(r'# from (?P<path>.*)')
REQUIRED_GEN_VERSION_LINE = re.compile(r'(?P<name>\S+)\s+(?P<version>\d+\.\d+)')
# "mod_path" and "mod_mtime" markers are used in tests
BLACKLIST_VERSION_LINE = re.compile(r'(?P<path>{mod_path}|[^=]+) = (?P<version>\d+\.\d+) (?P<mtime>{mod_mtime}|\d+)')
ENV_TEST_MODE_FLAG = 'GENERATOR3_TEST_MODE'
ENV_PREGENERATION_MODE_FLAG = "IS_PREGENERATED_SKELETONS"
ENV_VERSION = 'GENERATOR3_VERSION'
ENV_REQUIRED_GEN_VERSION_FILE = 'GENERATOR3_REQUIRED_GEN_VERSION_FILE'
FAILED_VERSION_STAMP_PREFIX = '.failed__'
CACHE_DIR_NAME = 'cache'
STATE_FILE_NAME = '.state.json'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import argparse
import json
from utils import *
import logging
logging.basicConfig(filename='log',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
def histogram(df, bounds, plot_path, stats_path):
"""Plot a histogram of a binned data with defined bin boundaries.
Args:
df : Pandas.DataFrame
Input dataframe with index of datetime object and each column is a
bin od data
bounds : list of floats
List of bins boundaries.
path : str
Path for saved plots
"""
# Initialise new series
columns = ["lower", "upper", "width", "midpoint",
"loglower", "logupper", "logwidth", "logmidpoint",
'Counts', 'Cum Counts', 'Density', "dN/logD"]
df1 = pd.DataFrame(columns=columns)
# Take a mean of the input dataframe which becomes a series with column
df1['Counts'] = df.mean(axis=0)
totalCounts = df1['Counts'].sum()
df1["lower"] = bounds[:-1]
df1["upper"] = bounds[1:]
df1["width"] = df1["upper"] - df1["lower"]
df1["midpoint"] = df1["width"]/2 + df1["lower"]
df1["loglower"] = np.log10(df1["lower"])
df1["logupper"] = np.log10(df1["upper"])
df1["logwidth"] = df1["logupper"] - df1["loglower"]
df1["logmidpoint"] = np.log10(df1["midpoint"])
df1["Density"] = df1["Counts"] / df1["width"]
df1["dN/logD"] = df1["Counts"] / df1["logwidth"]
# Iterate through the bins
bins = df1.index
cumCounts = 0
for ix, key in enumerate(bins):
counts = df1['Counts'].iloc[ix]
lower = df1['lower'].iloc[ix]
width = df1["width"].iloc[ix]
# Cumulative frequency
lowerCumCounts = cumCounts
cumCounts += counts
upperCumCounts = cumCounts
df1['Cum Counts'].iloc[ix] = cumCounts
# Median
if lowerCumCounts < totalCounts/2 < upperCumCounts:
median = lower + ((totalCounts/2 - lowerCumCounts)/counts) * width
# Statistics
counts = df1['Counts'].values
midpoints = df1["midpoint"].values
# logmidpoints = np.log10(midpoints)
# Normal distribution
mean, std, lower, upper = statistics(midpoints, counts)
# Log normal distribution
gm, gstd, glower, gupper = np.exp(statistics(np.log(midpoints), counts))
# Sometimes a median is not found and so need to be excluded from display
if 'median' in locals():
index = ['Median',
'Mean Diameter', 'Std', '95% lower', '95% upper',
'Geometric mean diameter', 'Geometric standard deviation',
'Geometric 95% lower', 'Geometric 95% upper']
statsdata = [median,
mean, std, lower, upper,
gm, std, glower, gupper]
else:
index = ['Mean Diameter', 'Std', '95% lower', '95% upper',
'Geometric mean diameter', 'Geometric standard deviation',
'Geometric 95% lower', 'Geometric 95% upper']
statsdata = [mean, std, lower, upper,
gm, std, glower, gupper]
statsdf = pd.DataFrame(statsdata, index=index)
columns = ['Counts', 'Cum Counts', 'Density', 'dN/logD']
if stats_path is not None:
save_latex(statsdf, stats_path, "histstats", header=False)
save_latex(df1[columns], stats_path, "histdata")
if plot_path is not None:
# Plot lognormal
x1 = df1["lower"].tolist() # left edge
x2 = df1["upper"].tolist() # right edge
w = np.array(x2)-np.array(x1) # variable width
y = df1["dN/logD"].tolist()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.bar(x1, y, width=w)
ax.set_xscale('log')
plt.ylabel(r'Frequency per $\log \mu$')
plt.xlabel(r'$\log \mathbf{Diameter}$ / $\mu$')
saveplot(plot_path, fig)
plt.close()
if __name__ == '__main__':
# Get filenames to work with
parser = argparse.ArgumentParser(description="")
parser.add_argument("data", help="Date file")
parser.add_argument("-p", "--plot",
help="Path to plot")
parser.add_argument("-s", "--stats",
help="Path to statistics")
parser.add_argument("-f", "--figsize",
help="Figure size")
options = parser.parse_args()
data_path = options.data
plot_path = options.plot
stats_path = options.stats
fig_size = options.figsize
msg = "Individual histogram of %s" % (data_path)
logging.debug(msg)
debug = "Data: %s, Plot: %s, Stats: %s" % (data_path, plot_path, stats_path)
logging.debug(debug)
# Set figure size
if fig_size is not None:
params = {"figure.figsize": figsize(float(fig_size))}
else:
params = {"figure.figsize": figsize(0.49)}
matplotlib.rcParams.update(params)
# load data
df = load_data(data_path)
# Fetch bin boundaries
# Get path to settings json
paths = os.path.normpath(data_path).split(os.path.sep)
sensor = os.path.splitext(paths[-1])[0]
path = os.path.sep.join(paths[:-3])
filename = paths[-3] + ".json"
path = os.path.join(path, filename)
# Load setting file
with open(path) as handle:
settings = json.load(handle)
# Grab bin boundaries of a sensor
bounds = settings['sensors'][sensor]['bins']
if plot_path is not None and stats_path is not None:
# Plot histogram and get statistics
histogram(df, bounds, plot_path, stats_path)
else:
raise ValueError("path not given for --plot or --stats")
|
import numpy as np
from schedgym.sched_env import SchedEnv
DATA_PATH = 'vmagent/data/Huawei-East-1.csv'
if __name__ == "__main__":
env = SchedEnv(5, 40, 90, DATA_PATH, render_path='../test.p',
allow_release=False, double_thr=32)
MAX_STEP = 1e4
env.reset(np.random.randint(0, MAX_STEP))
done = env.termination()
while not done:
feat = env.get_attr('req')
obs = env.get_attr('obs')
# sample by first fit
avail = env.get_attr('avail')
action = np.random.choice(np.where(avail == 1)[0])
action, next_obs, reward, done = env.step(action)
|
import mysql.connector
import json
import re
import pandas as pd
import pymysql
from sqlalchemy import create_engine
import sys
# Adding the path of self-def Library
sys.path.append("C:/Users/A02wxy/Documents/GitHub/WayFinder/Direction/Library/script/")
from featureCollection import Feature, Vertex
from myio import read_excel
from mysqlCoon import MY_ENGINE
def MYSQL_DUMP_NEXT():
# ====== Connection ====== #
# Connecting to mysql by providing a sqlachemy engine
engine = MY_ENGINE()
# dump dist excel to mysql
floorNumber = 9
for i in range(0, 9):
fileName = "C:\\Users\\A02wxy\\Documents\\GitHub\\WayFinder\\Direction\\Route\\next\\sf" + str(i + 1) + "f_next.xlsx"
tableName = "sf" + str(i + 1) + "f_next"
df = read_excel(fileName)
df.to_sql(name = tableName, if_exists="replace", con = engine, index = False)
MYSQL_DUMP_NEXT()
|
class Winding():
def __init__(self,type,voltage,current,taps=None,fill=True):
self.type = type
self.voltage = voltage
self.current = current
self.taps = taps
self.fillLast = fill
self.va = voltage*current
self.turns = 0.0
self.layers = 0.0
self.turnsPerLayer = 0.0
self.meanPathLength = 0.0
self.wireLength = 0.0 # feet
self.wireDiameter = 0.0
self.layers = 0.0
self.resistance = 0.0
self.voltageDrop = 0.0
self.weight = 0.0
self.height = 0.0
self.wire = None
self.vout = 0.0
self.voutRMS = 0.0
self.voutNoLoad = 0.0
self.voutRegulation = 0.0
|
# Set the absolute path in
import numpy as np
import pandas as pd
#from matplotlib import pyplot as plt
import os
import csv
if not os.getenv("LAB_PATH"):
print("Set Lab Path\n")
exit(1)
benchmarks = ['510.parest_r','541.leela_r','641.leela_s','531.deepsjeng_r','631.deepsjeng_s','505.mcf_r','605.mcf_s','523.xalancbmk_r', '623.xalancbmk_s']
# for filename in os.listdir('/data/benchmarks/spec2017/benchspec/CPU'):
# exec_name = '/data/benchmarks/spec2017/benchspec/CPU/'+filename+'/exe/'+filename.split(".")[1]+"_base.mytest-m64"
# if os.path.isfile(exec_name) and str(filename)[0].isdigit():
# benchmarks.append(filename)
benchmarks.sort()
datadir = os.getenv("LAB_PATH") + '/results-proj-stride/X86/run_micro'
failed_benchmarks = set()
def gem5GetStat(filename, stat):
filename = os.path.join(datadir, '', filename, 'stats.txt').replace('\\','/')
with open(filename) as f:
r = f.read()
print(filename)
if len(r) < 10:
return 0.0
if (r.find(stat) != -1) :
start = r.find(stat) + len(stat) + 1
end = r.find('#', start)
return float(r[start:end])
else:
return float(0.0)
all_arch = ['X86']
plt_arch = ['X86']
all_mem_ctls = ['DDR3_1600_8x8', 'DDR3_2133_8x8', 'LPDDR2_S4_1066_1x32', 'HBM_1000_4H_1x64']
plt_mem_ctls = ['DDR3_1600_8x8']
all_gem5_cpus = ['Simple','DefaultO3','Minor4']
plt_gem5_cpus = ['Simple','DefaultO3']
rows = []
for bm in benchmarks:
for cpu in plt_gem5_cpus:
for mem in plt_mem_ctls:
rows.append([bm,cpu,mem, gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem, 'sim_ticks')/333,
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem, 'sim_insts'),
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem, 'sim_ops'),
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem, 'sim_ticks')/1e9,
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem, 'host_op_rate'),
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem,'system.mem_ctrl.dram.avgMemAccLat'),
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem,'system.mem_ctrl.dram.busUtil'),
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem,'system.mem_ctrl.dram.bw_total::total'),
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem,'system.mem_ctrl.dram.totBusLat'),
#memory with store
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem,'system.mem_ctrl.dram.avgWrBW'),
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem,'system.cpu.l1d.tags.occ_percent::.cpu.data'),
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem,'system.cpu.l1d.tags.occ_percent::.cpu.l1d.prefetcher'),
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem,'system.cpu.l1d.tags.occ_percent::total'),
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem,'system.cpu.l1d.prefetcher.pfIdentified'),
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem,'system.cpu.l1d.prefetcher.pfBufferHit'),
gem5GetStat(datadir+"/"+bm+"/"+cpu+"/"+mem,'system.cpu.l1d.prefetcher.pfInCache')
])
df = pd.DataFrame(rows, columns=['benchmark','cpu', 'mem', 'cycles','instructions', 'Ops', 'Ticks','Host', 'avgmemaccesslatency','busutilit','bandwidthtotal','totalbuslatency', 'averagewritebandwidth', 'occ_data', 'occ_pf', 'occ_total', 'pfIdentified', 'pfBufferHit','pfInCache'])
df['ipc'] = df['instructions']/df['cycles']
df['cpi']= 1/df['ipc']
print(df)
stats = 'occ_data', 'occ_pf', 'occ_total', 'pfIdentified', 'pfBufferHit','pfInCache'
for stat in stats:
savename = stat+'_l1d_stride.csv'
with open(savename, 'w', newline='') as csvfile:
header = ['benchmark']
writer = csv.writer(csvfile)
for cpu in plt_gem5_cpus:
for mem in plt_mem_ctls:
header.append(mem + "-" + cpu)
writer.writerow(header)
for bm in benchmarks:
row = [bm]
for cpu in plt_gem5_cpus:
for mem in plt_mem_ctls:
d = df[(df['mem']==mem) & (df['benchmark']==bm) & (df['cpu']==cpu)]
row.append(d[stat].iloc[0])
writer.writerow(row)
|
import os
from mortar_rdb.testing import register_session, TestingBase
from mortar_rdb import get_session, declarative_base
from mortar_rdb.controlled import Config, Source
from testfixtures.components import TestComponents
from mock import Mock
from sqlalchemy.pool import StaticPool
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.ext.declarative import declarative_base as sa_declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, String
from testfixtures import Replacer, compare, TempDirectory, OutputCapture
from unittest import TestCase
class TestRegisterSessionFunctional(TestCase):
def setUp(self):
self.dir = TempDirectory()
self.components = TestComponents()
def tearDown(self):
self.components.uninstall()
self.dir.cleanup()
def test_functional(self):
Base = sa_declarative_base()
class Model(Base):
__tablename__ = 'model'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
register_session(
transactional=False,
config=Config(Source(Model.__table__)))
session = get_session()
session.add(Model(name='foo'))
session.commit()
def test_functional_metadata(self):
Base = sa_declarative_base()
class Model(Base):
__tablename__ = 'model'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
register_session(
transactional=False,
metadata=Base.metadata
)
session = get_session()
session.add(Model(name='foo'))
session.commit()
def test_functional_echo_but_no_url(self):
with Replacer() as r:
# make sure there's no DB_URL
r.replace('os.environ', dict())
# hoover up the logging ;-)
with OutputCapture():
register_session(echo=True)
def test_tricky_to_delete(self):
# respect any DB_URL set here so that
# we sure the real db here to make sure
# delete works across all our DB types...
db_path = (
os.environ.get('DB_URL', '').strip() or
'sqlite:///'+os.path.join(self.dir.path, 'test.db')
)
Base = sa_declarative_base()
class Model1(Base):
__tablename__ = 'model1'
id = Column(Integer, primary_key=True)
model2_id = Column(Integer, ForeignKey('model2.id'))
model2 = relationship("Model2")
class Model2(Base):
__tablename__ = 'model2'
id = Column('id', Integer, primary_key=True)
# create in one session
register_session(db_path,
name='create',
transactional=False,
metadata=Base.metadata)
m1 = Model1()
m2 = Model2()
m1.model2 = m2
session = get_session('create')
if db_path.startswith('sqlite:'):
session.execute('PRAGMA foreign_keys = ON')
session.add(m1)
session.add(m2)
session.commit()
compare(session.query(Model1).count(), 1)
compare(session.query(Model2).count(), 1)
session.rollback()
# now register another session which should
# blow the above away
register_session(db_path,name='read',
transactional=False,
metadata=Base.metadata)
session = get_session('read')
compare(session.query(Model1).count(), 0)
compare(session.query(Model2).count(), 0)
session.rollback()
def test_only_some_packages(self):
Base = sa_declarative_base()
class Model1(Base):
__tablename__ = 'model1'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
class Model2(Base):
__tablename__ = 'model2'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
register_session(
transactional=False,
config=Config(Source(Model1.__table__)))
# only table1 should have been created!
compare(
[u'model1'],
Inspector.from_engine(get_session().bind).get_table_names()
)
class TestRegisterSessionCalls(TestCase):
def setUp(self):
self.components = TestComponents()
self.r = Replacer()
self.m = Mock()
self.r.replace('mortar_rdb.testing.real_register_session',
self.m.realRegisterSession)
self.r.replace('mortar_rdb.testing.create_engine',
self.m.create_engine)
# mock out for certainty
# self.r.replace('mortar_rdb.testing.???',Mock())
# mock out for table destruction
get_session = Mock()
bind = get_session.return_value.bind
bind.dialect.inspector.return_value = inspector = Mock()
inspector.get_table_names.return_value = ()
self.r.replace('mortar_rdb.testing.get_session', get_session)
def tearDown(self):
self.r.restore()
self.components.uninstall()
def test_default_params(self):
# ie: no DB_URL!
self.r.replace('os.environ',dict())
register_session()
compare([
('create_engine',
('sqlite://',),
{'poolclass': StaticPool,
'echo': False}),
('realRegisterSession',
(None, u'', self.m.create_engine.return_value, False, True, True, None), {}),
],self.m.method_calls)
def test_specified_params(self):
register_session(
url='x://',
name='foo',
echo=True,
transactional=False,
scoped=False,
)
compare([
('realRegisterSession',
('x://', u'foo', None, True, False, False, None), {}),
],self.m.method_calls)
def test_echo_but_no_url(self):
# make sure there's no DBURL
self.r.replace('os.environ',dict())
register_session(echo=True)
compare([
('create_engine',
('sqlite://',),
{'poolclass': StaticPool,
'echo': True}),
('realRegisterSession',
(None, u'', self.m.create_engine.return_value, False, True, True, None), {}),
],self.m.method_calls)
def test_engine_passed(self):
engine = object()
register_session(
engine=engine,
)
compare([
('realRegisterSession',
(None, u'', engine, False, True, True, None), {}),
],self.m.method_calls)
def test_url_from_environment(self):
self.r.replace('os.environ',dict(
DB_URL = 'x://'
))
register_session()
compare([
('realRegisterSession',
('x://', u'', None, False, True, True, None), {}),
],self.m.method_calls)
def test_empty_environment_url(self):
self.r.replace('os.environ',dict(
DB_URL = ''
))
register_session()
compare([
('create_engine',
('sqlite://',),
{'poolclass': StaticPool,
'echo': False}),
('realRegisterSession',
('', u'', self.m.create_engine.return_value, False, True, True, None), {}),
],self.m.method_calls)
def test_engine_overrides_environment(self):
self.r.replace('os.environ',dict(
DB_URL = 'x://'
))
engine = object()
register_session(engine=engine)
compare([
('realRegisterSession',
(None, u'', engine, False, True, True, None), {}),
],self.m.method_calls)
class TestTestingBase(TestCase):
def test_manual(self):
b1 = declarative_base()
tb = TestingBase()
b2 = declarative_base()
tb.restore()
b3 = declarative_base()
# checks
self.assertFalse(b1 is b2)
self.assertFalse(b3 is b2)
self.assertTrue(b1 is b3)
def test_context_manager(self):
b1 = declarative_base()
with TestingBase():
b2 = declarative_base()
b3 = declarative_base()
# checks
self.assertFalse(b1 is b2)
self.assertFalse(b3 is b2)
self.assertTrue(b1 is b3)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class NetworkVirtualAppliancesOperations(object):
"""NetworkVirtualAppliancesOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2020-07-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-07-01"
self.config = config
def _delete_initial(
self, resource_group_name, network_virtual_appliance_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, network_virtual_appliance_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual
Appliance.
:type network_virtual_appliance_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_virtual_appliance_name=network_virtual_appliance_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'}
def get(
self, resource_group_name, network_virtual_appliance_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual
Appliance.
:type network_virtual_appliance_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkVirtualAppliance or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2020_07_01.models.NetworkVirtualAppliance
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkVirtualAppliance', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'}
def update_tags(
self, resource_group_name, network_virtual_appliance_name, tags=None, custom_headers=None, raw=False, **operation_config):
"""Updates a Network Virtual Appliance.
:param resource_group_name: The resource group name of Network Virtual
Appliance.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual
Appliance being updated.
:type network_virtual_appliance_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkVirtualAppliance or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2020_07_01.models.NetworkVirtualAppliance
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkVirtualAppliance', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'}
def _create_or_update_initial(
self, resource_group_name, network_virtual_appliance_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NetworkVirtualAppliance')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkVirtualAppliance', response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkVirtualAppliance', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, network_virtual_appliance_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual
Appliance.
:type network_virtual_appliance_name: str
:param parameters: Parameters supplied to the create or update Network
Virtual Appliance.
:type parameters:
~azure.mgmt.network.v2020_07_01.models.NetworkVirtualAppliance
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns NetworkVirtualAppliance
or ClientRawResponse<NetworkVirtualAppliance> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2020_07_01.models.NetworkVirtualAppliance]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2020_07_01.models.NetworkVirtualAppliance]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_virtual_appliance_name=network_virtual_appliance_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('NetworkVirtualAppliance', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'}
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all Network Virtual Appliances in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkVirtualAppliance
:rtype:
~azure.mgmt.network.v2020_07_01.models.NetworkVirtualAppliancePaged[~azure.mgmt.network.v2020_07_01.models.NetworkVirtualAppliance]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.NetworkVirtualAppliancePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances'}
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all Network Virtual Appliances in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkVirtualAppliance
:rtype:
~azure.mgmt.network.v2020_07_01.models.NetworkVirtualAppliancePaged[~azure.mgmt.network.v2020_07_01.models.NetworkVirtualAppliance]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.NetworkVirtualAppliancePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualAppliances'}
|
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2688w!3waxtb=(&w=gko1ko0s!(15yk(bz2vvv-lh8filq%x*#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
|
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from HelloWorld import views
urlpatterns = [
path('admin/', admin.site.urls),
path('HelloWorld/', include('HelloWorld.urls')),
path('common/', include('common.urls')),
path('', views.index, name='index'), # '/' 에 해당되는 path
]
|
import inspect
import os
import sys
import time
from functools import partial
import dill as pkl
from ..dtaiexperimenter import Function, Process
from ..io import dump_object
from .utils import extract_source_of_function
from ..execs import (
DTAIExperimenterFunctionExecutor,
DTAIExperimenterProcessExecutor,
NativeExecutor,
ShellExecutor,
DTAIExperimenterShellExecutor,
)
class Flow:
"""A workflow object.
A flow is an abstraction of 'something' that needs to be executed
A good workflow abstracts away a lot of boilerplate regarding
- config
- io
- execution
For the executors, we introduce the following terminology;
`local`: Execute flow in python process
`shell`: Execute flow in a shell
`now`: Stderr and stdout go as default
`log`: Stderr and stdout are collected in a logfile.
`command`:
Returns:
[type] -- [description]
"""
executors = dict(
local_now=NativeExecutor,
shell_now=ShellExecutor,
local_log=DTAIExperimenterFunctionExecutor,
shell_log=DTAIExperimenterProcessExecutor,
shell_log_autonomous=DTAIExperimenterShellExecutor,
)
executioners = executors
def __init__(
self,
config=None,
flow=None,
imports=None,
log_filepath="logfile",
flow_filepath="flowfile.pkl",
timeout_s=60,
):
# Basics
self.log_filepath = log_filepath
self.flow_filepath = flow_filepath
self.timeout_s = timeout_s
self.dumped = False
self.config = config
# If you provide python function directly, affe handles that.
if flow is not None:
self.flow = flow
if imports is not None:
self.imports = imports
return
# All things execution
def execute(self):
e = self.executors.get("local_now")(self)
return e.execute()
def run(self):
return self.execute()
def run_via_shell(self, **kwargs):
e = self.executors.get("shell_now")(self, **kwargs)
return e.execute()
def run_with_log(self, **kwargs):
e = self.executors.get("local_log")(self, **kwargs)
return e.execute()
def run_with_log_via_shell(self, return_log_filepath=True, **kwargs):
e = self.executors.get("shell_log")(self, **kwargs)
return e.execute(return_log_filepath=return_log_filepath)
def run_with_log_via_shell_autonomous(self, return_log_filepath=True, **kwargs):
e = self.executors.get("shell_log_autonomous")(self, **kwargs)
return e.execute(return_log_filepath=return_log_filepath)
def run_via_shell_with_log(self, return_log_filepath=True, **kwargs):
"""Synonym to method above."""
return self.run_with_log_via_shell(
return_log_filepath=return_log_filepath, **kwargs
)
def run_via_shell_with_log_autonomous(self, return_log_filepath=True, **kwargs):
"""Synonym to method above."""
return self.run_with_log_via_shell_autonomous(
return_log_filepath=return_log_filepath, **kwargs
)
def get_shell_command(self, **kwargs):
e = self.executors.get("shell_now")(self, **kwargs)
return e.command
def get_shell_with_log_command(self, **kwargs):
# The other shell_log command goes via Process of DTAIExperimenter, this one generates a standalone bash command.
e = self.executors.get("shell_log_autonomous")(self, **kwargs)
return e.command
# Persistence (Load + Dump)
@classmethod
def load(cls, fn):
with open(fn, "rb") as f:
flow = pkl.load(f)
return flow
def dump(self, flow_filepath=None):
if flow_filepath is not None:
self.flow_filepath = flow_filepath
dump_object(self, self.flow_filepath)
self.dumped = True
return
|
from os.path import expanduser
import re
import operator
from io import StringIO
import math
import os
import sys
if sys.version_info[0] < 3:
import ConfigParser as configparser
else:
import configparser
DEBUG = 0
def yes_or_no(title):
menu = {1:['Yes',True],2:['No',False]}
print("\n")
print('#' * len(title))
print(str(title))
print ('#'*len(title))
for key in sorted(menu):
print(str(key) + ":" + menu[key][0])
pattern = r'^[0-9]+$'
while True:
ans = input("Make A Choice: [ENTER]")
if re.match(pattern, ans) is not None:
if int(ans) in menu:
answer = menu[int(ans)][1]
break
return answer
def single_question(title):
print("\n")
print('#########################################')
print('## '+str(title))
print('#########################################')
while True:
ans = input("[ENTER]")
if ans is not None:
answer = ans
break
return answer
def multiple_choice(title, list):
counter = 1
menu = {}
for l in list:
menu[counter]=str(l)
counter+=1
print("\n")
print('#########################################')
print('## '+str(title)+' ##')
print('#########################################')
for key in sorted(menu):
print(str(key) + ":" + menu[key])
pattern = r'^[0-9]+$'
while True:
ans = input("Make A Choice: [ENTER]")
if re.match(pattern, ans) is not None:
if int(ans) in menu:
answer = menu[int(ans)]
break
return answer
def indent(rows, hasHeader=False, headerChar='-', delim=' | ', justify='left',
separateRows=False, prefix='', postfix='', wrapfunc=lambda x:x):
"""Indents a table by column.
- rows: A sequence of sequences of items, one sequence per row.
- hasHeader: True if the first row consists of the columns' names.
- headerChar: Character to be used for the row separator line
(if hasHeader==True or separateRows==True).
- delim: The column delimiter.
- justify: Determines how are data justified in their column.
Valid values are 'left','right' and 'center'.
- separateRows: True if rows are to be separated by a line
of 'headerChar's.
- prefix: A string prepended to each printed row.
- postfix: A string appended to each printed row.
- wrapfunc: A function f(text) for wrapping text; each element in
the table is first wrapped by this function."""
# closure for breaking logical rows to physical, using wrapfunc
def rowWrapper(row):
newRows = [wrapfunc(item).split('\n') for item in row]
return [[substr or '' for substr in item] for item in map(None,*newRows)]
# break each logical row into one or more physical ones
logicalRows = [rowWrapper(row) for row in rows]
# columns of physical rows
columns = map(None,*reduce(operator.add,logicalRows))
# get the maximum of each column by the string length of its items
maxWidths = [max([len(str(item)) for item in column]) for column in columns]
rowSeparator = headerChar * (len(prefix) + len(postfix) + sum(maxWidths) + \
len(delim)*(len(maxWidths)-1))
# select the appropriate justify method
justify = {'center':str.center, 'right':str.rjust, 'left':str.ljust}[justify.lower()]
output=StringIO()
if separateRows: print >> output, rowSeparator
for physicalRows in logicalRows:
for row in physicalRows:
print >> output, \
prefix \
+ delim.join([justify(str(item),width) for (item,width) in zip(row,maxWidths)]) \
+ postfix
if separateRows or hasHeader: print >> output, rowSeparator; hasHeader=False
return output.getvalue()
# written by Mike Brown
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/148061
def wrap_onspace(text, width):
"""
A word-wrap function that preserves existing line breaks
and most spaces in the text. Expects that existing line
breaks are posix newlines (\n).
"""
if type(text) is str:
return reduce(lambda line, word, width=width: '%s%s%s' %
(line,
' \n'[(len(line[line.rfind('\n')+1:])
+ len(word.split('\n',1)[0]
) >= width)],
word),
text.split(' ')
)
elif type(text) is list:
new_text = ''
counter = 0
for e in text:
counter += 1
new_text += '('+str(counter)+') '+str(e)+"\n"
#new_text = ''.join(str(e) for e in text)
return reduce(lambda line, word, width=width: '%s%s%s' %
(line,
' \n'[(len(line[line.rfind('\n')+1:])
+ len(word.split('\n',1)[0]
) >= width)],
word),
new_text.split(' ')
)
def wrap_onspace_strict(text, width):
"""Similar to wrap_onspace, but enforces the width constraint:
words longer than width are split."""
wordRegex = re.compile(r'\S{'+str(width)+r',}')
if type(text) is str:
return wrap_onspace(wordRegex.sub(lambda m: wrap_always(m.group(),width),text),width)
elif type(text) is list:
new_text = ''
counter = 0
for e in text:
counter += 1
new_text += '('+str(counter)+') '+str(e)+"\n"
#new_text = ''.join(str(e) for e in text)
return wrap_onspace(wordRegex.sub(lambda m: wrap_always(m.group(),width),new_text),width)
def wrap_always(text, width):
"""A simple word-wrap function that wraps text on exactly width characters.
It doesn't split the text in words."""
if type(text) is str:
return '\n'.join([ text[width*i:width*(i+1)] for i in xrange(int(math.ceil(1.*len(text)/width))) ])
elif type(text) is list:
new_text = ''
counter = 0
for e in text:
counter += 1
new_text += '('+str(counter)+') '+str(e)+"\n"
#new_text = ''.join(str(e) for e in text)
return '\n'.join([ new_text[width*i:width*(i+1)] for i in xrange(int(math.ceil(1.*len(new_text)/width))) ])
def select_path(title, search_path):
DIRS = []
for child in os.listdir(search_path):
path = os.path.join(search_path, child)
if os.path.isdir(path):
DIRS.append(path)
MENU = {}
UNSORTED_MENU = {}
COUNTER = 0
for d in DIRS:
list = []
list.append(d)
matchObj = re.match(r'.*[!/]([a-zA-Z0-9-_]+)$', d, re.M | re.I)
if matchObj:
list.append(matchObj.group(1))
UNSORTED_MENU[matchObj.group(1)] = list
for key in sorted(UNSORTED_MENU):
COUNTER += 1
MENU[COUNTER] = UNSORTED_MENU[key]
print("\n")
print("####################################")
print('## '+str(title)+' ##')
print("####################################")
for key in sorted(MENU):
print(str(key) + ": " + MENU[key][1])
while True:
USER_RESPONSE = input("Make a selection [ENTER] (Cntrl-C to exit):")
if int(USER_RESPONSE) in MENU:
directory = MENU[int(USER_RESPONSE)][0]
break
return directory
def read_ini_file_to_dictionary(filename):
parser = configparser.ConfigParser()
parser.optionxform = str
parser.read(filename)
confdict = {section: dict(parser.items(section)) for section in parser.sections()}
return confdict
def auth_config():
home = expanduser("~")
inifile = str(home)+'/.auth0_client/auth0_client.ini'
my_config = read_ini_file_to_dictionary(inifile)
config_dict = {}
config_dict['domain'] = my_config['PARAMETERS']['domain']
config_dict['client_id'] = my_config['PARAMETERS']['id']
config_dict['client_secret'] = my_config['PARAMETERS']['secret']
return config_dict
|
import numpy as np
import pytest
from beast.observationmodel.extra_filters import make_integration_filter, make_top_hat_filter
@pytest.mark.parametrize(
"lambda_start,lambda_finish,d_lambda",
[(90., 913., 1.), (1000, 3000, 100)],
)
def test_extra_filters(lambda_start, lambda_finish, d_lambda):
# create example integration filter
f_int = make_integration_filter(
lambda_start, lambda_finish, d_lambda, "QION", observatory="Pseudo", instrument="Fake"
)
# test bandwidth and name
np.testing.assert_allclose(f_int.bandwidth, lambda_finish - lambda_start - d_lambda)
if not f_int.name == "Pseudo_Fake_QION":
raise AssertionError()
# create example top hat filters
f_top = make_top_hat_filter(
lambda_start, lambda_finish, d_lambda, "TOP", observatory="Pseudo", instrument="Fake"
)
np.testing.assert_allclose(f_top.bandwidth, lambda_finish - lambda_start - d_lambda)
if not f_top.name == "Pseudo_Fake_TOP":
raise AssertionError()
|
# urllib3/connectionpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import socket
import errno
from socket import error as SocketError, timeout as SocketTimeout
from .util import resolve_cert_reqs, resolve_ssl_version, assert_fingerprint
try: # Python 3
from http.client import HTTPConnection, HTTPException
from http.client import HTTP_PORT, HTTPS_PORT
except ImportError:
from httplib import HTTPConnection, HTTPException
from httplib import HTTP_PORT, HTTPS_PORT
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
try: # Compiled with SSL?
HTTPSConnection = object
class BaseSSLError(BaseException):
pass
ssl = None
try: # Python 3
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .request import RequestMethods
from .response import HTTPResponse
from .util import get_host, is_connection_dropped, ssl_wrap_socket
from .exceptions import (
ClosedPoolError,
EmptyPoolError,
HostChangedError,
MaxRetryError,
SSLError,
TimeoutError,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .packages import six
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
port_by_scheme = {
'http': HTTP_PORT,
'https': HTTPS_PORT,
}
## Connection objects (extension of httplib)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
sock = socket.create_connection((self.host, self.port), self.timeout)
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=self.host,
ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE:
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif self.assert_hostname is not False:
match_hostname(self.sock.getpeercert(),
self.assert_hostname or self.host)
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
self.host = host
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
:param timeout:
Socket timeout in seconds for each individual connection, can be
a float. None disables timeout.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
scheme = 'http'
def __init__(self, host, port=None, strict=False, timeout=None, maxsize=1,
block=False, headers=None):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
self.timeout = timeout
self.pool = self.QueueCls(maxsize)
self.block = block
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
return HTTPConnection(host=self.host,
port=self.port,
strict=self.strict)
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning("HttpConnectionPool is full, discarding connection: %s"
% self.host)
# Connection never got put back into the pool, close it.
conn.close()
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given httplib connection object taken from our
pool.
"""
self.num_requests += 1
if timeout is _Default:
timeout = self.timeout
conn.timeout = timeout # This only does anything in Py26+
conn.request(method, url, **httplib_request_kw)
# Set timeout
sock = getattr(conn, 'sock', False) # AppEngine doesn't have sock attr.
if sock:
sock.settimeout(timeout)
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if self.port and not port:
# Use explicit default port for comparison when none is given.
port = port_by_scheme.get(scheme)
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Number of retries to allow before raising a MaxRetryError exception.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307). Each redirect counts as a retry.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one request.
It may be a float (in seconds).
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if retries < 0:
raise MaxRetryError(self, url)
if timeout is _Default:
timeout = self.timeout
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries - 1)
conn = None
try:
# Request a connection from the queue
conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object
httplib_response = self._make_request(conn, method, url,
timeout=timeout,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty as e:
# Timed out by queue
raise TimeoutError(self, url,
"Request timed out. (pool_timeout=%s)" %
pool_timeout)
except SocketTimeout as e:
# Timed out by socket
raise TimeoutError(self, url,
"Request timed out. (timeout=%s)" %
timeout)
except BaseSSLError as e:
# SSL certificate error
raise SSLError(e)
except CertificateError as e:
# Name mismatch
raise SSLError(e)
except (HTTPException, SocketError) as e:
# Connection broken, discard. It will be replaced next _get_conn().
conn = None
# This is necessary so we can access e below
err = e
if retries == 0:
raise MaxRetryError(self, url, e)
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warn("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries - 1, redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`httplib.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
into an SSL socket.
"""
scheme = 'https'
def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1,
block=False, headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None):
HTTPConnectionPool.__init__(self, host, port,
strict, timeout, maxsize,
block, headers)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not ssl: # Platform-specific: Python compiled without +ssl
if not HTTPSConnection or HTTPSConnection is object:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
return HTTPSConnection(host=self.host,
port=self.port,
strict=self.strict)
connection = VerifiedHTTPSConnection(host=self.host,
port=self.port,
strict=self.strict)
connection.set_cert(key_file=self.key_file, cert_file=self.cert_file,
cert_reqs=self.cert_reqs, ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
connection.ssl_version = self.ssl_version
return connection
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example: ::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
|
def now(ExTime = 600):
from ntptime import settime
import setting
import utime
import time
timeserver = setting.get('timeserver')
try:
print(utime.time())
timeset = False
nn = 0
while timeset == False:
try:
if timeserver =='':
settime()
timeset = True
else:
settime(timeserver)
timeset = True
except:
timeset = False
print('Try NTP')
nn += 1
if nn == 15:
timeset = True
time.sleep(1)
print(utime.time())
IOTHUB = setting.get('iothub')
DEVICE = setting.get('iotdevicename')
KEY = setting.get('iotdevicesecret')
USER = IOTHUB + '/' + DEVICE #+ '/api-version=2016-11-14'
EXPIRES = utime.time() + 946684800 + ExTime # +30 Jahre + 10 Min
PASSWD = GenerateAzureSasToken(IOTHUB + '/devices/' + DEVICE, KEY, EXPIRES)
except:
PASSWD = ''
return PASSWD
def GenerateAzureSasToken(uri, key, expiryTimestamp, policy_name=None):
from ubinascii import a2b_base64, b2a_base64
def _quote(s) :
r = ''
for c in str(s) :
if (c >= 'a' and c <= 'z') or \
(c >= '0' and c <= '9') or \
(c >= 'A' and c <= 'Z') or \
(c in '.-_') :
r += c
else :
r += '%%%02X' % ord(c)
return r
uri = _quote(uri)
sign_key = b'%s\n%d' % (uri, int(expiryTimestamp))
key = a2b_base64(key)
hmac = HMACSha256(key, sign_key)
signature = _quote( b2a_base64(hmac).decode().strip() )
token = 'sr=' + uri + '&' + 'sig=' + signature + '&' + 'se=' + str(expiryTimestamp)
if policy_name :
token += '&' + 'skn=' + policy_name
return 'SharedAccessSignature ' + token
def HMACSha256(keyBin, msgBin) :
from uhashlib import sha256
block_size = 64 # SHA-256 blocks size
trans_5C = bytearray(256)
for x in range(len(trans_5C)) :
trans_5C[x] = x^0x5C
trans_36 = bytearray(256)
for x in range(len(trans_36)) :
trans_36[x] = x^0x36
def translate(d, t) :
res = bytearray(len(d))
for x in range(len(d)) :
res[x] = t[d[x]]
return res
keyBin = keyBin + chr(0) * (block_size - len(keyBin))
inner = sha256()
inner.update(translate(keyBin, trans_36))
inner.update(msgBin)
inner = inner.digest()
outer = sha256()
outer.update(translate(keyBin, trans_5C))
outer.update(inner)
return outer.digest()
|
# pylint: disable=missing-module-docstring
import sys
from .viewer import main as _main
if __name__ == "__main__":
sys.exit(_main())
|
import logging
from functools import partial
from datetime import datetime, timedelta
from typing import Mapping
from pprint import pformat
import attr
from crud.abc import Endpoint, Serializable
from crud.exceptions import GatewayConnectionError
from ..utils import FuncByDates
from ..utils.gateways import Montage as MontageGateway
from ..dixel import Dixel
@attr.s(hash=False)
class Montage(Endpoint, Serializable):
name = attr.ib(default="Montage")
protocol = attr.ib(default="http")
host = attr.ib(default="localhost")
port = attr.ib(default=80)
path = attr.ib( default="api/v1" )
user = attr.ib( default="montage" )
password = attr.ib( default="montage" )
gateway = attr.ib(init=False, type=MontageGateway, repr=False)
@gateway.default
def setup_gateway(self):
return MontageGateway(
name = "MontageGateway",
protocol = self.protocol,
host = self.host,
port = self.port,
path = self.path,
user = self.user,
password = self.password
)
def find(self, query: Mapping, index="rad", ignore_errs=True, get_meta=False):
r = self.gateway.find(query=query, index=index)
ret = set()
for item in r:
try:
d = Dixel.from_montage_json(item)
if get_meta:
d = self.get_meta(d)
ret.add(d)
except Exception as e:
logger = logging.getLogger(self.name)
logger.warning("Failed to dixelize an item")
if not ignore_errs:
raise e
return ret
def get_meta(self, item: Dixel):
cpts = self.gateway.lookup_cpts(item.meta["MontageCPTCodes"])
body_part = self.gateway.lookup_body_part(item.meta["MontageCPTCodes"])
item.meta['CPTCodes'] = cpts
item.meta['BodyParts'] = body_part
return item
def check(self):
logger = logging.getLogger(self.name)
logger.debug("Check")
try:
return self.gateway._get("index") is not None
except GatewayConnectionError as e:
logger.warning("Failed to connect to Endpoint")
logger.error(type(e))
logger.error(e)
return False
def iter_query_by_date(self, q: Mapping,
start: datetime, stop: datetime, step: timedelta,
get_meta=False):
def qdt(q, _start, _stop):
if not q:
_q = {}
else:
_q = {**q}
__start = min(_start, _stop)
__stop = max(_start, _stop)
__stop -= timedelta(seconds=1)
_q["start_date"] = __start.date().isoformat()
_q["end_date"] = __stop.date().isoformat()
return _q
func = partial(qdt, q)
gen = FuncByDates(func, start, stop, step)
for qq in gen:
logging.debug(pformat(qq))
cache = self.find(qq, get_meta=get_meta)
for item in cache:
yield item
Montage.register()
|
#!/usr/bin/env python3
import wx, os, effects
from PIL import Image
class MainGUI(wx.Frame):
def __init__(self,parent,title):
wx.Frame.__init__(self,parent,title=title,size=(1000,600))
self.panel = wx.Panel(self)
self.filePath = ""
self.wildcard = "images (*.jpeg,*.jpg,*.png)|*.jpeg;*.jpg;*.png"
self.photoWidth = 600
self.photoHeight = 800
self.wxImage = wx.Image(self.photoWidth,self.photoHeight)
self.imageFrame = wx.StaticBitmap(self.panel,wx.ID_ANY,wx.Bitmap(self.wxImage))
self.PIL_image = None
#self.OG_bands = None
self.old_shift = 0
#File Menu
fileMenu = wx.Menu()
menuOpen = fileMenu.Append(wx.ID_OPEN,"Choose Image","Open an image to edit")
menuSave = fileMenu.Append(wx.ID_SAVE,"Save Image","Save the image over itself")
menuSaveAs = fileMenu.Append(wx.ID_SAVEAS,"Save Image As","Save a new image")
menuExit = fileMenu.Append(wx.ID_EXIT,"Exit","Close this program")
#Help Menu
helpMenu = wx.Menu()
menuAbout = helpMenu.Append(wx.ID_ABOUT,"About","Information about this program")
menu = wx.MenuBar()
menu.Append(fileMenu,"File")
menu.Append(helpMenu,"Help")
self.SetMenuBar(menu)
#Button
resetButton = wx.Button(self.panel,label="Reset Image")
resetButton.SetToolTip(wx.ToolTip('Reloads the image from disk. All effects will be lost'))
saveButton = wx.Button(self.panel,label="Save Image")
saveButton.SetToolTip(wx.ToolTip('Acts like File->Save Image As'))
openButton = wx.Button(self.panel,label="Open Image")
glitchButton = wx.Button(self.panel,label="Glitch")
glitchButton.SetToolTip(wx.ToolTip('This can glitch the image beyond recognition. Use at your own risk!'))
#StaticText
Xlabel = wx.StaticText(self.panel,label="X")
Ylabel = wx.StaticText(self.panel,label="Y")
Rlabel = wx.StaticText(self.panel,label="Red:")
Glabel = wx.StaticText(self.panel,label="Green:")
Blabel = wx.StaticText(self.panel,label="Blue:")
glitchAmountlabel = wx.StaticText(self.panel,label="Amount:")
Seedlabel = wx.StaticText(self.panel,label="Seed:")
Iterlabel = wx.StaticText(self.panel,label="Iteration(s):")
#SpinCtrl
self.RslideX = wx.SpinCtrl(self.panel,min=-500,max=500)
self.RslideY = wx.SpinCtrl(self.panel,min=-500,max=500)
self.GslideX = wx.SpinCtrl(self.panel,min=-500,max=500)
self.GslideY = wx.SpinCtrl(self.panel,min=-500,max=500)
self.BslideX = wx.SpinCtrl(self.panel,min=-500,max=500)
self.BslideY = wx.SpinCtrl(self.panel,min=-500,max=500)
self.glitchSeed = wx.SpinCtrl(self.panel,min=-0,max=99)
self.glitchSeed.SetToolTip(wx.ToolTip('This determines where in the image data the effect starts overwriting data. Value from 1 to 99'))
self.glitchIter = wx.SpinCtrl(self.panel,min=-0,max=115)
self.glitchIter.SetToolTip(wx.ToolTip('This determines how many times the effect overwrites data. Value from 1-115'))
self.glitchAmount = wx.SpinCtrl(self.panel,min=-0,max=99)
self.glitchAmount.SetToolTip(wx.ToolTip('This determines the hex value that is used to overwrite original values in the image data. Value from 1 to 99'))
self.RslideX.name = "redX"
self.RslideY.name = "redY"
self.GslideX.name = "greenX"
self.GslideY.name = "greenY"
self.BslideX.name = "blueX"
self.BslideY.name = "blueY"
#Layout
topLayout = wx.BoxSizer(wx.HORIZONTAL)
labelLayout = wx.BoxSizer(wx.HORIZONTAL)
controlLayout = wx.BoxSizer(wx.VERTICAL)
RLayout = wx.BoxSizer(wx.HORIZONTAL)
GLayout = wx.BoxSizer(wx.HORIZONTAL)
BLayout = wx.BoxSizer(wx.HORIZONTAL)
buttonLayout = wx.BoxSizer(wx.HORIZONTAL)
glitchControl = wx.BoxSizer(wx.HORIZONTAL)
labelLayout.AddSpacer(125)
labelLayout.Add(Xlabel,0,wx.ALL,5)
labelLayout.AddSpacer(125)
labelLayout.Add(Ylabel,0,wx.ALL,5)
RLayout.Add(Rlabel,0,wx.ALL|wx.ALIGN_LEFT,5)
RLayout.AddSpacer(12)
RLayout.Add(self.RslideX,0,wx.ALL|wx.ALIGN_RIGHT,5)
RLayout.Add(self.RslideY,0,wx.ALL|wx.ALIGN_RIGHT,5)
GLayout.Add(Glabel,0,wx.ALL|wx.ALIGN_LEFT,5)
GLayout.Add(self.GslideX,0,wx.ALL|wx.ALIGN_RIGHT,5)
GLayout.Add(self.GslideY,0,wx.ALL|wx.ALIGN_RIGHT,5)
BLayout.Add(Blabel,0,wx.ALL|wx.ALIGN_LEFT,5)
BLayout.AddSpacer(10)
BLayout.Add(self.BslideX,0,wx.ALL|wx.ALIGN_RIGHT,5)
BLayout.Add(self.BslideY,0,wx.ALL|wx.ALIGN_RIGHT,5)
glitchControl.Add(glitchAmountlabel,0,wx.ALL|wx.ALIGN_LEFT|wx.ALIGN_CENTRE,5)
glitchControl.Add(self.glitchAmount,0,wx.ALL|wx.ALIGN_RIGHT,5)
glitchControl.Add(Seedlabel,0,wx.ALL|wx.ALIGN_LEFT|wx.ALIGN_CENTRE,5)
glitchControl.Add(self.glitchSeed,0,wx.ALL|wx.ALIGN_RIGHT,5)
glitchControl.Add(Iterlabel,0,wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTRE,5)
glitchControl.Add(self.glitchIter,0,wx.ALL|wx.ALIGN_RIGHT,5)
glitchControl.Add(glitchButton,5,wx.ALL|wx.ALIGN_CENTRE)
buttonLayout.Add(openButton,5)
buttonLayout.Add(saveButton,5)
buttonLayout.Add(resetButton,5)
controlLayout.Add(labelLayout,0,wx.ALL|wx.EXPAND,5)
controlLayout.Add(RLayout,0,wx.ALL|wx.EXPAND,5)
controlLayout.Add(GLayout,0,wx.ALL|wx.EXPAND,5)
controlLayout.Add(BLayout,0,wx.ALL|wx.EXPAND,5)
controlLayout.Add(glitchControl,0,wx.ALL|wx.EXPAND,5)
controlLayout.Add(buttonLayout,0,wx.ALL|wx.EXPAND,5)
topLayout.Add(controlLayout,0,wx.ALL|wx.EXPAND,5)
topLayout.Add(self.imageFrame,0,wx.ALL|wx.EXPAND,5)
self.panel.SetSizer(topLayout)
topLayout.Fit(self)
#Events
self.Bind(wx.EVT_MENU,self.OnOpen,menuOpen)
self.Bind(wx.EVT_MENU,self.OnAbout,menuAbout)
self.Bind(wx.EVT_MENU,self.OnExit,menuExit)
self.Bind(wx.EVT_MENU,self.OnSave,menuSave)
self.Bind(wx.EVT_MENU,self.OnSaveAs,menuSaveAs)
self.Bind(wx.EVT_SPINCTRL,self.shiftColors,self.RslideX)
self.Bind(wx.EVT_SPINCTRL,self.shiftColors,self.RslideY)
self.Bind(wx.EVT_SPINCTRL,self.shiftColors,self.GslideX)
self.Bind(wx.EVT_SPINCTRL,self.shiftColors,self.GslideY)
self.Bind(wx.EVT_SPINCTRL,self.shiftColors,self.BslideX)
self.Bind(wx.EVT_SPINCTRL,self.shiftColors,self.BslideY)
self.Bind(wx.EVT_BUTTON,self.resetImage,resetButton)
self.Bind(wx.EVT_BUTTON,self.OnSaveAs,saveButton)
self.Bind(wx.EVT_BUTTON,self.OnOpen,openButton)
self.Bind(wx.EVT_BUTTON,self.glitchImage,glitchButton)
self.Show(True)
def glitchImage(self,e):
if self.PIL_image is not None:
seed = self.glitchSeed.GetValue()
iterations = self.glitchIter.GetValue()
amount = self.glitchAmount.GetValue()
if seed != 0 and iterations != 0 and amount != 0:
self.PIL_image = effects.glitch(self.PIL_image,amount,seed,iterations)
self.showImage(None,False)
def shiftColors(self,e):
if self.PIL_image is not None:
spin = e.GetEventObject()
color = spin.name[:-1]
if spin.GetValue() > self.old_shift:
shift_value = 1
else:
shift_value = -1
old_shift = spin.GetValue()
if spin.name.endswith("X"):
self.PIL_image = effects.shiftColor(self.PIL_image,color,shift_value,0)
else:
self.PIL_image = effects.shiftColor(self.PIL_image,color,0,shift_value)
self.showImage(None,False)
def resetImage(self,e):
self.showImage(self.filePath,True)
self.resetSliders()
def resetSliders(self):
self.RslideX.SetValue(0)
self.RslideY.SetValue(0)
self.GslideX.SetValue(0)
self.GslideY.SetValue(0)
self.BslideX.SetValue(0)
self.BslideY.SetValue(0)
self.glitchSeed.SetValue(0)
self.glitchIter.SetValue(0)
self.glitchAmount.SetValue(0)
def OnOpen(self,e):
with wx.FileDialog(self,"Choose a file",defaultDir=os.path.expanduser('~'),wildcard=self.wildcard,style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as dlg:
if dlg.ShowModal() == wx.ID_OK:
self.resetSliders()
self.filePath = dlg.GetPath()
self.showImage(self.filePath,True)
def OnSave(self,e):
self.SaveImage(self.PIL_image,self.filePath)
def OnSaveAs(self,e):
dlg = wx.FileDialog(self,"Save Image", wildcard=self.wildcard,style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
result = dlg.ShowModal()
path = dlg.GetPath()
dlg.Destroy()
if result == wx.ID_OK:
self.SaveImage(self.PIL_image,path)
def OnAbout(self,e):
dlg = wx.MessageDialog(self,("A practice image editing application made using Python and wxWidgets\n\n"
"https://gitlab.com/bunu/aesthetic-image"),"About Progam",wx.OK)
dlg.ShowModal()
dlg.Destroy()
def OnExit(self,e):
self.Close(True)
def SaveImage(self,imageData,directory):
try:
imageData.save(directory)
except PermissionError as p:
dlg = wx.MessageDialog(self,"You do not have permission to save to this location\n\n" + repr(p),"Permission Error",wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
except IOError as e:
dlg = wx.MessageDialog(self,"Something went wrong when writing to this location\n\n" + repr(e),"I/O Error",wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
except ValueError as v:
dlg = wx.MessageDialog(self,"Add a file extension to the end of name such as .jpg or .png\n\n" + repr(v),"I/O Error",wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
except AttributeError:
pass
def showImage(self,filepath,fromOpen):
if fromOpen:
img = wx.Image(filepath,wx.BITMAP_TYPE_ANY)
self.PIL_image = Image.open(filepath)
dim = self.findScale(img.GetWidth(),img.GetHeight())
img.Rescale(dim[0],dim[1])
else:
img = effects.PIL_to_wx(self.PIL_image)
img = wx.Bitmap.ConvertToImage(img)
dim = self.findScale(img.GetWidth(),img.GetHeight())
img.Rescale(dim[0],dim[1])
self.imageFrame.SetBitmap(wx.Bitmap(img))
self.panel.Refresh()
def findScale(self,w,h):
h_ratio = h/self.photoHeight
w_ratio = w/self.photoWidth
if h_ratio > w_ratio:
return (w/h_ratio,h/h_ratio)
else:
return (w/w_ratio,h/w_ratio)
if __name__ == '__main__':
app = wx.App(False)
gui = MainGUI(None,"aesthetic-image")
app.MainLoop()
|
class Agent(object):
def __init__(self, init_pos = [1,1], goal_pos = [11,11], normal_pos = [11,2], bad_pos = [6,11]):
self.pos = [init_pos[0], init_pos[1]]
self.goal_pos = goal_pos
self.normal_goal_pos = normal_pos
self.bad_goal_pos = bad_pos
self.action_space = 4
self.done = False
def action(self,a,d):
if a == 0 and d[0]:
self.pos[1] += 1
elif a == 1 and d[1]:
self.pos[1] -= 1
elif a == 2 and d[2]:
self.pos[0] += 1
elif a == 3 and d[3]:
self.pos[0] -= 1
else:
pass
def get_state(self):
state = self.pos[0]*13 + self.pos[1]
return state
def check_done(self):
if self.pos[0] == self.goal_pos[0] and self.pos[1] == self.goal_pos[1]:
done = True
elif self.pos[0] == self.normal_goal_pos[0] and self.pos[1] == self.normal_goal_pos[1]:
done = True
elif self.pos[0] == self.bad_goal_pos[0] and self.pos[1] == self.bad_goal_pos[1]:
done = True
else:
done = False
return done
"""
class Agent():
def __init__(self, start=[1,1], goal=[11,11], normal_goal=[11,2], bad_goal=[6,11]):
self.start = start
self.goal_pos = goal_pos
self.normal_goal_pos = normal_pos
self.bad_goal_pos = bad_pos
self.action_space = 4
def action(self):
s
"""
|
import numpy
import pytest
from ..adapters.array import ArrayAdapter
from ..adapters.mapping import MapAdapter
from ..client import from_tree
from ..queries import FullText
tree = MapAdapter(
{
"a": ArrayAdapter.from_array(
numpy.arange(10), metadata={"apple": "red", "animal": "dog"}
),
"b": ArrayAdapter.from_array(
numpy.arange(10), metadata={"banana": "yellow", "animal": "dog"}
),
"c": ArrayAdapter.from_array(
numpy.arange(10), metadata={"cantalope": "orange", "animal": "cat"}
),
}
)
@pytest.mark.parametrize(
"term, expected_keys",
[
("red", ["a"]),
("yellow", ["b"]),
("orange", ["c"]),
("dog", ["a", "b"]),
("cat", ["c"]),
],
)
def test_search(term, expected_keys):
client = from_tree(tree)
query = FullText(term)
results = client.search(query)
assert list(results) == expected_keys
def test_compound_search():
client = from_tree(tree)
results = client.search(FullText("dog")).search(FullText("yellow"))
assert list(results) == ["b"]
def test_key_into_results():
client = from_tree(tree)
results = client.search(FullText("dog"))
assert "apple" in results["a"].metadata
assert "banana" in results["b"].metadata
assert "c" not in results # This *is* in the tree but not among the results.
|
# Copyright 2018 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from recipe_engine import recipe_test_api
from .api import EnsureFile
class CIPDTestApi(recipe_test_api.RecipeTestApi):
EnsureFile = EnsureFile
def make_resolved_version(self, v):
if not v:
return '40-chars-fake-of-the-package-instance_id'
if len(v) == 40:
return v
# Truncate or pad to 40 chars.
prefix = 'resolved-instance_id-of-'
if len(v) + len(prefix) >= 40:
return '%s%s' % (prefix, v[:40-len(prefix)])
return '%s%s%s' % (prefix, v, '-' * (40 - len(prefix) - len(v)))
def make_pin(self, package_name, version=None):
return {
'package': package_name.replace('${platform}', 'resolved-platform'),
'instance_id': self.make_resolved_version(version),
}
def _resultify(self, result, error=None, retcode=None):
dic = {'result': result}
if error:
dic['error'] = error
return self.m.json.output(dic, retcode=retcode)
def example_error(self, error, retcode=None):
return self._resultify(
result=None,
error=error,
retcode=1 if retcode is None else retcode)
def example_acl_check(self, package_path, check=True):
return self._resultify(check)
def example_build(self, package_name, version=None):
return self._resultify(self.make_pin(package_name, version))
example_register = example_build
example_pkg_fetch = example_build
example_pkg_deploy = example_build
def example_ensure(self, ensure_file):
return self._resultify({
subdir or '': [self.make_pin(name, version)
for name, version in sorted(packages)]
for subdir, packages in ensure_file.packages.iteritems()
})
def example_set_tag(self, package_name, version):
return self._resultify([{
'package': package_name,
'pin': self.make_pin(package_name, version)
}])
def example_set_metadata(self, package_name, version):
return self._resultify([{
'package': package_name,
'pin': self.make_pin(package_name, version)
}])
def example_set_ref(self, package_name, version):
return self._resultify({'': [{
'package': package_name,
'pin': self.make_pin(package_name, version)
}]})
def example_search(self, package_name, instances=None):
if instances is None:
# Return one instance by default.
return self._resultify([self.make_pin(package_name)])
if isinstance(instances, int):
instances = ['instance_id_%i' % (i+1) for i in xrange(instances)]
return self._resultify([self.make_pin(package_name, instance)
for instance in instances])
def example_describe(self, package_name, version=None,
test_data_refs=None, test_data_tags=None,
user='user:44-blablbla@developer.gserviceaccount.com',
tstamp=1446574210):
assert not test_data_tags or all(':' in tag for tag in test_data_tags)
if test_data_tags is None:
test_data_tags = [
'buildbot_build:some.waterfall/builder/1234',
'git_repository:https://chromium.googlesource.com/some/repo',
'git_revision:397a2597cdc237f3026e6143b683be4b9ab60540',
]
if test_data_refs is None:
test_data_refs = ['latest']
# If user explicitly put empty tags/refs (i.e. ())
if not test_data_refs and not test_data_tags:
# quick and dirty version differentiation
if ':' in version:
return self._resultify(None, error='no such tag', retcode=1)
if len(version) == 44 or len(version) == 40:
return self._resultify(None, error='no such instance', retcode=1)
return self._resultify(None, error='no such ref', retcode=1)
return self._resultify({
'pin': self.make_pin(package_name, version),
'registered_by': user,
'registered_ts': tstamp,
'refs': [
{
'ref': ref,
'modified_by': user,
'modified_ts': tstamp,
'instance_id': self.make_resolved_version(ref),
}
for ref in test_data_refs
],
'tags': [
{
'tag': tag,
'registered_by': user,
'registered_ts': tstamp,
}
for tag in test_data_tags
],
})
|
import logging
from typing import Dict, Sequence
from django.contrib.auth.models import User, Group
from django.test import TestCase, client
# Create your tests here.
from rest_framework.reverse import reverse_lazy
from rest_framework.test import APIRequestFactory, APIClient
from rest_framework import status
from django.conf import settings
from django.urls import URLResolver, URLPattern, exceptions
from django.core.mail import send_mail
from django.test.utils import override_settings
logger = logging.getLogger(__name__)
root_urlconf = __import__(settings.ROOT_URLCONF) # import root_urlconf module
VIEW_NAMES = [] # maintain a global list
detail_views_list = []
def get_all_view_names(urlpatterns, namespace=None):
for pattern in urlpatterns:
if isinstance(pattern, URLResolver):
if hasattr(pattern, "namespace") and pattern.namespace is not None:
namespace = pattern.namespace
get_all_view_names(pattern.url_patterns, namespace)
elif isinstance(pattern, URLPattern):
if pattern.name is not None:
name = pattern.name
if namespace is not None:
name = namespace + ":" + name
detail_views_list.append(name)
@override_settings(EMAIL_BACKEND="django.core.mail.backends.smtp.EmailBackend")
class TestEmailVerification(TestCase):
send_mail(
"test_email_simple from django",
"Here is the message.",
"db-ultimate@frisbeeverband.at",
["flokain11@gmail.com"],
)
class testApiStartSession(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.client = APIClient()
self.groups: Dict[Group] = {}
self.groups["club_admin"] = Group.objects.get_or_create(name="club_admin")
self.groups["association_admin"] = Group.objects.get_or_create(name="association_admin")
self.users = {}
self.users["admin"] = User.objects.create(
username="admin", email="admin@admin.com", password="admin", is_superuser=True
)
self.users["club_admin"] = User.objects.create(
username="club_admin", email="club-admin@club-admin.com", password="club_admin"
)
def test_Session_Start(self):
response = self.client.post(
"/api/auth/login/", {"email": "admin@admin.com", "password": "admin"}, format="json"
)
self.assertTrue(status.is_success(response.status_code))
def test_all_urlpatterns(self):
all_urlpatterns = __import__(settings.ROOT_URLCONF).urls.urlpatterns
get_all_view_names(all_urlpatterns)
all_views_list = []
# remove redundant entries and specific ones we don't care about
for each in detail_views_list:
if each not in "serve add_view change_view changelist_view history_view delete_view RedirectView":
if each not in all_views_list:
all_views_list.append(each)
print(all_views_list)
for view in all_views_list:
try:
response = self.client.get(reverse_lazy(view))
logger.debug(response.status_code)
self.assertTrue(
status.is_success(response.status_code)
or response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
)
except exceptions.NoReverseMatch:
pass
|
#!/usr/bin/env python
from setuptools import setup
from glob import glob
packages = ['sldr']
scripts = set(filter(lambda x: x.rfind(".") == -1, glob('scripts/*')))
setup(name='sldr',
version='0.7.5',
description='python package and scripts for working with SLDR',
long_description="""Modules and scripts useful for working with SLDR.""",
maintainer='David Rowe',
maintainer_email='david_rowe@sil.org',
url='http://github.com/silnrsi/sldrtools',
packages=packages,
scripts=scripts,
license='LGPL',
platforms=['Linux', 'Win32', 'Mac OS X'],
package_dir={'': 'lib'},
package_data={'sldr': ['allkeys.txt',
'language-subtag-registry.txt',
'likelySubtags.xml',
'sil.dtd',
'supplementalData.xml',
'supplementalMetadata.xml']}
)
|
# import math
# import librosa
import torch
import pickle
# import torch.nn as nn
# from torch_stft import STFT
# from nemo import logging
from nemo.collections.asr.parts.perturb import AudioAugmentor
# from nemo.collections.asr.parts.segment import AudioSegment
class RpycWaveformFeaturizer(object):
def __init__(
self, sample_rate=16000, int_values=False, augmentor=None, rpyc_conn=None
):
self.augmentor = augmentor if augmentor is not None else AudioAugmentor()
self.sample_rate = sample_rate
self.int_values = int_values
self.remote_path_samples = rpyc_conn.get_path_samples
def max_augmentation_length(self, length):
return self.augmentor.max_augmentation_length(length)
def process(self, file_path, offset=0, duration=0, trim=False):
audio = self.remote_path_samples(
file_path,
target_sr=self.sample_rate,
int_values=self.int_values,
offset=offset,
duration=duration,
trim=trim,
)
return torch.tensor(pickle.loads(audio), dtype=torch.float)
def process_segment(self, audio_segment):
self.augmentor.perturb(audio_segment)
return torch.tensor(audio_segment, dtype=torch.float)
@classmethod
def from_config(cls, input_config, perturbation_configs=None):
if perturbation_configs is not None:
aa = AudioAugmentor.from_config(perturbation_configs)
else:
aa = None
sample_rate = input_config.get("sample_rate", 16000)
int_values = input_config.get("int_values", False)
return cls(sample_rate=sample_rate, int_values=int_values, augmentor=aa)
|
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from decimal import Decimal
import io
from test_framework.blocktools import add_witness_commitment, create_block, create_coinbase, send_to_witness
from test_framework.messages import BIP125_SEQUENCE_NUMBER, CTransaction
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
connect_nodes_bi,
hex_str_to_bytes,
)
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[
"-walletrbf={}".format(i),
"-mintxfee=0.00002",
] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(self, rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
test_change_script_match(rbf_node, dest_address)
test_maxtxfee_fails(self, rbf_node, dest_address)
# These tests wipe out a number of utxos that are expected in other tests
test_small_output_with_feerate_succeeds(rbf_node, dest_address)
test_no_more_inputs_fails(rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(self, rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
self.sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
self.sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.getaddressinfo(rbf_node.getnewaddress(address_type='p2sh-segwit'))
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransactionwithwallet(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransactionwithwallet(rawtx)
signedtx = peer_node.signrawtransactionwithwallet(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransactionwithwallet(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_small_output_with_feerate_succeeds(rbf_node, dest_address):
# Make sure additional inputs exist
rbf_node.generatetoaddress(101, rbf_node.getnewaddress())
rbfid = spend_one_input(rbf_node, dest_address)
original_input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
assert_equal(len(original_input_list), 1)
original_txin = original_input_list[0]
# Keep bumping until we out-spend change output
tx_fee = 0
while tx_fee < Decimal("0.0005"):
new_input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
new_item = list(new_input_list)[0]
assert_equal(len(original_input_list), 1)
assert_equal(original_txin["txid"], new_item["txid"])
assert_equal(original_txin["vout"], new_item["vout"])
rbfid_new_details = rbf_node.bumpfee(rbfid)
rbfid_new = rbfid_new_details["txid"]
raw_pool = rbf_node.getrawmempool()
assert rbfid not in raw_pool
assert rbfid_new in raw_pool
rbfid = rbfid_new
tx_fee = rbfid_new_details["origfee"]
# input(s) have been added
final_input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
assert_greater_than(len(final_input_list), 1)
# Original input is in final set
assert [txin for txin in final_input_list
if txin["txid"] == original_txin["txid"]
and txin["vout"] == original_txin["vout"]]
rbf_node.generatetoaddress(1, rbf_node.getnewaddress())
assert_equal(rbf_node.gettransaction(rbfid)["confirmations"], 1)
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
# (32-byte p2sh-pwpkh output size + 148 p2pkh spend estimate) * 10k(discard_rate) / 1000 = 1800
# P2SH outputs are slightly "over-discarding" due to the IsDust calculation assuming it will
# be spent as a P2PKH.
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 50000 - 1800})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) # change output is eliminated
def test_settxfee(rbf_node, dest_address):
assert_raises_rpc_error(-8, "txfee cannot be less than min relay tx fee", rbf_node.settxfee, Decimal('0.000005'))
assert_raises_rpc_error(-8, "txfee cannot be less than wallet min fee", rbf_node.settxfee, Decimal('0.000015'))
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_maxtxfee_fails(test, rbf_node, dest_address):
test.restart_node(1, ['-maxtxfee=0.00003'] + test.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Unable to create transaction: Fee exceeds maximum configured by -maxtxfee", rbf_node.bumpfee, rbfid)
test.restart_node(1, test.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then invalidate the block so the rbf tx will be put back in the mempool.
# This makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
# Can not abandon conflicted tx
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: rbf_node.abandontransaction(txid=bumpid))
rbf_node.invalidateblock(block.hash)
# Call abandon to make sure the wallet doesn't attempt to resubmit
# the bump tx and hope the wallet does not rebroadcast before we call.
rbf_node.abandontransaction(bumpid)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
assert(rbf_node.getbalance() < 49)
rbf_node.generatetoaddress(101, rbf_node.getnewaddress())
rbfid = rbf_node.sendtoaddress(dest_address, 49, "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
def test_change_script_match(rbf_node, dest_address):
"""Test that the same change addresses is used for the replacement transaction when possible."""
def get_change_address(tx):
tx_details = rbf_node.getrawtransaction(tx, 1)
txout_addresses = [txout['scriptPubKey']['addresses'][0] for txout in tx_details["vout"]]
return [address for address in txout_addresses if rbf_node.getaddressinfo(address)["ischange"]]
# Check that there is only one change output
rbfid = spend_one_input(rbf_node, dest_address)
change_addresses = get_change_address(rbfid)
assert_equal(len(change_addresses), 1)
# Now find that address in each subsequent tx, and no other change
bumped_total_tx = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_equal(change_addresses, get_change_address(bumped_total_tx['txid']))
bumped_rate_tx = rbf_node.bumpfee(bumped_total_tx["txid"])
assert_equal(change_addresses, get_change_address(bumped_rate_tx['txid']))
def spend_one_input(node, dest_address, change_size=Decimal("0.00049000")):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
destinations = {dest_address: Decimal("0.00050000")}
if change_size > 0:
destinations[node.getrawchangeaddress()] = change_size
rawtx = node.createrawtransaction([tx_input], destinations)
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
add_witness_commitment(block)
block.solve()
node.submitblock(block.serialize().hex())
return block
def test_no_more_inputs_fails(rbf_node, dest_address):
# feerate rbf requires confirmed outputs when change output doesn't exist or is insufficient
rbf_node.generatetoaddress(1, dest_address)
# spend all funds, no change output
rbfid = rbf_node.sendtoaddress(rbf_node.getnewaddress(), rbf_node.getbalance(), "", "", True)
assert_raises_rpc_error(-4, "Unable to create transaction: Insufficient funds", rbf_node.bumpfee, rbfid)
if __name__ == "__main__":
BumpFeeTest().main()
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
import os
import unittest
import tempfile
from azure_devtools.scenario_tests import AllowLargeResponse
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.profiles import supported_api_version, ResourceType
from azure.cli.testsdk import (
ScenarioTest, LiveScenarioTest, LocalContextScenarioTest, ResourceGroupPreparer, StorageAccountPreparer, live_only,
record_only)
from knack.util import CLIError
from msrestazure.tools import resource_id
from .credential_replacer import ExpressRoutePortLOAContentReplacer
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
class NetworkApplicationSecurityGroupScenario(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_network_asg')
def test_network_asg(self, resource_group):
self.kwargs.update({
'asg': 'asg1'
})
count1 = len(self.cmd('network asg list').get_output_in_json())
self.cmd('network asg create -g {rg} -n {asg} --tags foo=doo',
checks=self.check('tags.foo', 'doo'))
self.cmd('network asg update -g {rg} -n {asg} --tags foo=bar',
checks=self.check('tags.foo', 'bar'))
count2 = len(self.cmd('network asg list').get_output_in_json())
self.assertTrue(count2 == count1 + 1)
self.cmd('network asg show -g {rg} -n {asg}', checks=[
self.check('name', '{asg}'),
self.check('resourceGroup', '{rg}'),
self.check('tags.foo', 'bar')
])
self.cmd('network asg delete -g {rg} -n {asg}')
count3 = len(self.cmd('network asg list').get_output_in_json())
self.assertTrue(count3 == count1)
class NetworkLoadBalancerWithSku(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_network_lb_sku')
def test_network_lb_sku(self, resource_group):
self.kwargs.update({
'lb': 'lb1',
'sku': 'standard',
'location': 'eastus2',
'ip': 'pubip1'
})
self.cmd('network lb create -g {rg} -l {location} -n {lb} --sku {sku} --public-ip-address {ip}')
self.cmd('network lb show -g {rg} -n {lb}', checks=[
self.check('sku.name', 'Standard')
])
self.cmd('network public-ip show -g {rg} -n {ip}', checks=[
self.check('sku.name', 'Standard'),
self.check('publicIpAllocationMethod', 'Static')
])
class NetworkPrivateEndpoints(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_network_private_endpoints')
def test_network_private_endpoints(self, resource_group):
self.kwargs.update({
'lb': 'lb1',
'sku': 'Standard',
'vnet': 'vnet1',
'subnet1': 'subnet1',
'subnet2': 'subnet2',
'location': 'centralus',
'ip': 'pubip1',
'lks1': 'lks1',
'lks2': 'lks2',
'pe': 'pe1',
'rg': resource_group
})
# Create PLS
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet1} -l {location}')
self.cmd('network lb create -g {rg} -l {location} -n {lb} --public-ip-address {ip} --sku {sku}')
self.cmd('network vnet subnet update -g {rg} -n {subnet1} --vnet-name {vnet} --disable-private-link-service-network-policies')
self.cmd('network vnet subnet create -g {rg} -n {subnet2} --vnet-name {vnet} --address-prefixes 10.0.2.0/24')
self.cmd('network vnet subnet update -g {rg} -n {subnet2} --vnet-name {vnet} --disable-private-endpoint-network-policies')
pls1 = self.cmd('network private-link-service create -g {rg} -n {lks1} --vnet-name {vnet} --subnet {subnet1} --lb-name {lb} --lb-frontend-ip-configs LoadBalancerFrontEnd -l {location}', checks=[
self.check('type', 'Microsoft.Network/privateLinkServices'),
self.check('provisioningState', 'Succeeded'),
self.check('name', self.kwargs['lks1'])
]).get_output_in_json()
self.kwargs['pls_id'] = pls1['id']
self.cmd('network private-endpoint list-types -l {location}')
self.cmd('network private-endpoint create -g {rg} -n {pe} --vnet-name {vnet} --subnet {subnet2} --private-connection-resource-id {pls_id} --connection-name tttt -l {location}', checks=[
self.check('name', 'pe1'),
self.check('provisioningState', 'Succeeded')
])
# temporalily disable the test
'''
self.cmd('network private-endpoint update -g {rg} -n {pe} --request-message "test"', checks=[
self.check('privateLinkServiceConnections[0].requestMessage', 'test')
])
'''
self.cmd('network private-endpoint list')
self.cmd('network private-endpoint list -g {rg}', checks=[
self.check('length(@)', 1)
])
pe_connection_name = self.cmd('network private-link-service show -g {rg} -n {lks1}').get_output_in_json()['privateEndpointConnections'][0]['name']
self.kwargs['pe_connect'] = pe_connection_name
self.cmd('network private-link-service connection update -g {rg} -n {pe_connect} --service-name {lks1} --connection-status Rejected --action-required "need action"')
self.cmd('network private-endpoint show -g {rg} -n {pe}', checks=[
self.check('privateLinkServiceConnections[0].privateLinkServiceConnectionState.status', 'Rejected'),
self.check('privateLinkServiceConnections[0].privateLinkServiceConnectionState.actionsRequired', "need action")
])
self.cmd('network private-link-service connection delete -g {rg} -n {pe_connect} --service-name {lks1}')
self.cmd('network private-link-service show -g {rg} -n {lks1}', checks=[
self.check('length(privateEndpointConnections)', 0)
])
self.cmd('network private-endpoint delete -g {rg} -n {pe}')
@ResourceGroupPreparer(name_prefix='fanqiu_cli_test_network_private_endpoints', location='CentralUSEuap')
@StorageAccountPreparer(name_prefix='saplr', kind='StorageV2')
def test_network_private_endpoint_private_dns_zone_group(self, resource_group, storage_account):
from msrestazure.azure_exceptions import CloudError
self.kwargs.update({
'sa': storage_account,
'loc': 'CentralUSEuap',
'vnet': self.create_random_name('cli-vnet-', 24),
'subnet': self.create_random_name('cli-subnet-', 24),
'pe': self.create_random_name('cli-pe-', 24),
'pe_connection': self.create_random_name('cli-pec-', 24),
'zone_name1': 'www.clizone1.com',
'zone_name2': 'www.clizone2.com',
'private_dns_zone_group_name': 'clidnsgroup',
'private_zone_name1': 'clizone1',
'private_zone_name2': 'clizone2'
})
# Prepare network
self.cmd('network vnet create -n {vnet} -g {rg} -l {loc} --subnet-name {subnet}',
checks=self.check('length(newVNet.subnets)', 1))
self.cmd('network vnet subnet update -n {subnet} --vnet-name {vnet} -g {rg} '
'--disable-private-endpoint-network-policies true',
checks=self.check('privateEndpointNetworkPolicies', 'Disabled'))
# Create a private endpoint connection
pr = self.cmd('storage account private-link-resource list --account-name {sa} -g {rg}').get_output_in_json()
self.kwargs['group_id'] = pr[0]['groupId']
storage = self.cmd('storage account show -n {sa} -g {rg}').get_output_in_json()
self.kwargs['sa_id'] = storage['id']
private_endpoint = self.cmd(
'network private-endpoint create -g {rg} -n {pe} --vnet-name {vnet} --subnet {subnet} -l {loc} '
'--connection-name {pe_connection} --private-connection-resource-id {sa_id} '
'--group-ids blob').get_output_in_json()
self.assertEqual(private_endpoint['name'], self.kwargs['pe'])
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['name'], self.kwargs['pe_connection'])
self.assertEqual(
private_endpoint['privateLinkServiceConnections'][0]['privateLinkServiceConnectionState']['status'],
'Approved')
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['provisioningState'], 'Succeeded')
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['groupIds'][0], self.kwargs['group_id'])
self.kwargs['pe_id'] = private_endpoint['privateLinkServiceConnections'][0]['id']
# Show the connection at storage account
storage = self.cmd('storage account show -n {sa} -g {rg}').get_output_in_json()
self.assertIn('privateEndpointConnections', storage)
self.assertEqual(len(storage['privateEndpointConnections']), 1)
self.assertEqual(storage['privateEndpointConnections'][0]['privateLinkServiceConnectionState']['status'],
'Approved')
self.kwargs['sa_pec_id'] = storage['privateEndpointConnections'][0]['id']
self.kwargs['sa_pec_name'] = storage['privateEndpointConnections'][0]['name']
self.cmd('storage account private-endpoint-connection show --account-name {sa} -g {rg} --name {sa_pec_name}',
checks=self.check('id', '{sa_pec_id}'))
self.cmd('network private-endpoint show -g {rg} -n {pe}', checks=[
self.check('length(customDnsConfigs)', 1)
])
self.cmd('network private-dns zone create -n {zone_name1} -g {rg}')
self.cmd('network private-dns zone create -n {zone_name2} -g {rg}')
self.cmd('network private-endpoint dns-zone-group create --endpoint-name {pe} -g {rg} -n {private_dns_zone_group_name} '
'--zone-name {private_zone_name1} --private-dns-zone {zone_name1}',
checks=[
self.check('name', '{private_dns_zone_group_name}')
])
self.cmd('network private-endpoint dns-zone-group add --endpoint-name {pe} -g {rg} -n {private_dns_zone_group_name} '
'--zone-name {private_zone_name2} --private-dns-zone {zone_name2}',
checks=[
self.check('length(privateDnsZoneConfigs)', 2)
])
self.cmd('network private-endpoint dns-zone-group show --endpoint-name {pe} -g {rg} -n {private_dns_zone_group_name}', checks=[
self.check('length(privateDnsZoneConfigs)', 2)
])
self.cmd('network private-endpoint dns-zone-group list --endpoint-name {pe} -g {rg}', checks=[
self.check('length(@)', 1)
])
self.cmd('network private-endpoint dns-zone-group remove --endpoint-name {pe} -g {rg} -n {private_dns_zone_group_name} '
'--zone-name {private_zone_name2}',
checks=[
self.check('length(privateDnsZoneConfigs)', 1)
])
self.cmd('network private-endpoint dns-zone-group show --endpoint-name {pe} -g {rg} -n {private_dns_zone_group_name}', checks=[
self.check('length(privateDnsZoneConfigs)', 1)
])
self.cmd('network private-endpoint dns-zone-group delete --endpoint-name {pe} -g {rg} -n {private_dns_zone_group_name}')
class NetworkPrivateLinkService(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_network_private_link_service')
def test_network_private_link_service(self, resource_group):
self.kwargs.update({
'lb': 'lb1',
'sku': 'Standard',
'vnet': 'vnet1',
'subnet1': 'subnet1',
'subnet2': 'subnet2',
'location': 'centralus',
'ip': 'pubip1',
'lks1': 'lks1',
'lks2': 'lks2',
'sub1': '00000000-0000-0000-0000-000000000000'
})
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet1} -l {location}')
self.cmd('network lb create -g {rg} -l {location} -n {lb} --public-ip-address {ip} --sku {sku}')
self.cmd('network vnet subnet update -g {rg} -n {subnet1} --vnet-name {vnet} --disable-private-link-service-network-policies')
self.cmd('network vnet subnet create -g {rg} -n {subnet2} --vnet-name {vnet} --address-prefixes 10.0.2.0/24')
self.cmd('network vnet subnet update -g {rg} -n {subnet2} --vnet-name {vnet} --disable-private-endpoint-network-policies')
self.cmd('network private-link-service create -g {rg} -n {lks1} --vnet-name {vnet} --subnet {subnet1} --lb-name {lb} --lb-frontend-ip-configs LoadBalancerFrontEnd -l {location} --enable-proxy-protocol', checks=[
self.check('type', 'Microsoft.Network/privateLinkServices'),
self.check('length(ipConfigurations)', 1),
self.check('length(loadBalancerFrontendIpConfigurations)', 1),
self.check('enableProxyProtocol', True)
])
self.cmd('network private-link-service update -g {rg} -n {lks1} --visibility {sub1} {sub1} --auto-approval {sub1} {sub1} --enable-proxy-protocol False', checks=[
self.check('length(visibility.subscriptions)', 2),
self.check('length(autoApproval.subscriptions)', 2),
self.check('enableProxyProtocol', False)
])
self.cmd('network private-link-service list -g {rg}', checks=[
self.check('length(@)', 1),
self.check('@[0].type', 'Microsoft.Network/privateLinkServices')
])
self.cmd('network private-link-service show -g {rg} -n {lks1}', checks=[
self.check('type', 'Microsoft.Network/privateLinkServices'),
self.check('length(ipConfigurations)', 1),
self.check('length(loadBalancerFrontendIpConfigurations)', 1)
])
self.cmd('network private-link-service delete -g {rg} -n {lks1}')
self.cmd('network vnet subnet update -g {rg} -n {subnet1} --vnet-name {vnet} --disable-private-link-service-network-policies false', checks=[
self.check('privateLinkServiceNetworkPolicies', 'Enabled')
])
class NetworkLoadBalancerWithZone(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_network_lb_zone')
def test_network_lb_zone(self, resource_group):
self.kwargs.update({
'lb': 'lb1',
'lb2': 'lb4',
'lb3': 'lb5',
'zone': '2',
'location': 'eastus2',
'ip': 'pubip1',
'ip2': 'pubip2'
})
# LB with public ip
self.cmd('network lb create -g {rg} -l {location} -n {lb} --public-ip-zone {zone} --public-ip-address {ip}')
# No zone on LB and its front-ip-config
self.cmd('network lb show -g {rg} -n {lb}', checks=[
self.check("frontendIpConfigurations[0].zones", None),
self.check("zones", None)
])
# Zone on public-ip which LB uses to infer the zone
self.cmd('network public-ip show -g {rg} -n {ip}', checks=[
self.check('zones[0]', self.kwargs['zone'])
])
# LB w/o public ip, so called ILB
self.kwargs['lb'] = 'lb2'
self.cmd('network lb create -g {rg} -l {location} -n {lb} --frontend-ip-zone {zone} --public-ip-address "" --vnet-name vnet1 --subnet subnet1')
# Zone on front-ip-config, and still no zone on LB resource
self.cmd('network lb show -g {rg} -n {lb}', checks=[
self.check("frontendIpConfigurations[0].zones[0]", self.kwargs['zone']),
self.check("zones", None)
])
# add a second frontend ip configuration
self.cmd('network lb frontend-ip create -g {rg} --lb-name {lb} -n LoadBalancerFrontEnd2 -z {zone} --vnet-name vnet1 --subnet subnet1', checks=[
self.check("zones", [self.kwargs['zone']])
])
# test for private-ip-address-version
self.cmd('network lb create -g {rg} -n {lb2} -l westcentralus --sku Standard')
self.cmd('network public-ip create -n {ip2} -g {rg} -l westcentralus --sku Standard --allocation-method Static --version IPv6')
self.cmd('network lb frontend-ip create --lb-name {lb2} -n ipv6 -g {rg} --private-ip-address-version IPv6 --public-ip-address {ip2}', checks=[
self.check('name', 'ipv6'),
self.check('privateIpAddressVersion', 'IPv6'),
self.check('provisioningState', 'Succeeded')
])
self.cmd('network lb create -g {rg} -n {lb3} --sku Standard -l westcentralus --private-ip-address-version IPv6', checks=[
self.check('loadBalancer.frontendIPConfigurations[0].properties.privateIPAddressVersion', 'IPv6')
])
class NetworkPublicIpWithSku(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_network_lb_sku')
def test_network_public_ip_sku(self, resource_group):
self.kwargs.update({
'standard_sku': 'Standard',
'basic_sku': 'Basic',
'regional_tier': 'Regional',
'global_tier': 'Global',
'location': 'eastus2',
'ip1': 'pubip1',
'ip2': 'pubip2',
'ip3': 'pubip3',
'ip4': 'pubip4'
})
self.cmd('network public-ip create -g {rg} -l {location} -n {ip1}')
self.cmd('network public-ip show -g {rg} -n {ip1}', checks=[
self.check('sku.name', self.kwargs.get('basic_sku')),
self.check('sku.tier', self.kwargs.get('regional_tier')),
self.check('publicIpAllocationMethod', 'Dynamic')
])
self.cmd('network public-ip create -g {rg} -l {location} -n {ip2} --sku {standard_sku} --tags foo=doo')
self.cmd('network public-ip show -g {rg} -n {ip2}', checks=[
self.check('sku.name', self.kwargs.get('standard_sku')),
self.check('sku.tier', self.kwargs.get('regional_tier')),
self.check('publicIpAllocationMethod', 'Static'),
self.check('tags.foo', 'doo')
])
self.cmd('network public-ip create -g {rg} -l {location} -n {ip3} --sku {standard_sku} --tier {global_tier}')
self.cmd('network public-ip show -g {rg} -n {ip3}', checks=[
self.check('sku.name', self.kwargs.get('standard_sku')),
self.check('sku.tier', self.kwargs.get('global_tier')),
self.check('publicIpAllocationMethod', 'Static')
])
from azure.core.exceptions import HttpResponseError
with self.assertRaisesRegexp(HttpResponseError, 'Global publicIP addresses are only supported for standard SKU public IP addresses'):
self.cmd('network public-ip create -g {rg} -l {location} -n {ip4} --tier {global_tier}')
class NetworkPublicIpPrefix(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_network_public_ip_prefix', location='eastus2')
def test_network_public_ip_prefix(self, resource_group):
self.kwargs.update({
'prefix': 'prefix1',
'pip': 'pip1'
})
# Test prefix CRUD
self.cmd('network public-ip prefix create -g {rg} -n {prefix} --length 30',
checks=self.check('prefixLength', 30))
self.cmd('network public-ip prefix update -g {rg} -n {prefix} --tags foo=doo')
self.cmd('network public-ip prefix list -g {rg}',
checks=self.check('length(@)', 1))
self.cmd('network public-ip prefix delete -g {rg} -n {prefix}')
self.cmd('network public-ip prefix list -g {rg}',
checks=self.is_empty())
# Test public IP create with prefix
self.cmd('network public-ip prefix create -g {rg} -n {prefix} --length 30')
self.cmd('network public-ip create -g {rg} -n {pip} --public-ip-prefix {prefix} --sku Standard',
checks=self.check("publicIp.publicIpPrefix.id.contains(@, '{prefix}')", True))
# Test IP address version
self.kwargs.update({
'prefix_name_ipv4': 'public_ip_prefix_0',
'prefix_name_ipv5': 'public_ip_prefix_1',
'prefix_name_ipv6': 'public_ip_prefix_2'
})
# Check the default ip address version value
self.cmd('network public-ip prefix create -g {rg} -n {prefix_name_ipv4} --length 30', checks=[
self.check('publicIpAddressVersion', 'IPv4')
])
# Check the creation of public IP prefix with IPv6 address option
# Note: prefix length for IPv6 is minimal 124 and maximal 127 respectively
self.cmd('network public-ip prefix create -g {rg} -n {prefix_name_ipv6} --length 127 --version IPv6', checks=[
self.check('publicIpAddressVersion', 'IPv6')
])
# Check with unsupported IP address version: IPv5
with self.assertRaisesRegexp(SystemExit, '2'):
self.cmd('network public-ip prefix create -g {rg} -n {prefix_name_ipv6} --length 127 --version IPv5')
class NetworkMultiIdsShowScenarioTest(ScenarioTest):
@live_only()
@ResourceGroupPreparer(name_prefix='test_multi_id')
def test_network_multi_id_show(self, resource_group):
self.cmd('network public-ip create -g {rg} -n pip1')
self.cmd('network public-ip create -g {rg} -n pip2')
pip1 = self.cmd('network public-ip show -g {rg} -n pip1').get_output_in_json()
pip2 = self.cmd('network public-ip show -g {rg} -n pip2').get_output_in_json()
self.cmd('network public-ip show --ids {} {}'.format(pip1['id'], pip2['id']),
checks=self.check('length(@)', 2))
class NetworkUsageListScenarioTest(ScenarioTest):
def test_network_usage_list(self):
self.cmd('network list-usages --location westus', checks=self.check('type(@)', 'array'))
class NetworkAppGatewayDefaultScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_ag_basic')
def test_network_app_gateway_with_defaults(self, resource_group):
self.cmd('network application-gateway create -g {rg} -n ag1 --no-wait')
self.cmd('network application-gateway wait -g {rg} -n ag1 --exists')
self.cmd('network application-gateway update -g {rg} -n ag1 --no-wait')
self.cmd('network application-gateway update -g {rg} -n ag1 --no-wait --capacity 3 --sku standard_small --tags foo=doo')
self.cmd('network application-gateway wait -g {rg} -n ag1 --updated')
ag_list = self.cmd('network application-gateway list --resource-group {rg}', checks=[
self.check('type(@)', 'array'),
self.check("length([?resourceGroup == '{}']) == length(@)".format(resource_group), True)
]).get_output_in_json()
ag_count = len(ag_list)
self.cmd('network application-gateway show --resource-group {rg} --name ag1', checks=[
self.check('type(@)', 'object'),
self.check('name', 'ag1'),
self.check('resourceGroup', resource_group),
self.check('frontendIpConfigurations[0].privateIpAllocationMethod', 'Dynamic'),
self.check("frontendIpConfigurations[0].subnet.contains(id, 'default')", True)
])
self.cmd('network application-gateway show-backend-health -g {rg} -n ag1')
self.cmd('network application-gateway stop --resource-group {rg} -n ag1')
self.cmd('network application-gateway start --resource-group {rg} -n ag1')
self.cmd('network application-gateway delete --resource-group {rg} -n ag1')
self.cmd('network application-gateway list --resource-group {rg}', checks=self.check('length(@)', ag_count - 1))
@ResourceGroupPreparer(name_prefix='cli_test_ag_basic_with_waf_v2_sku')
def test_network_app_gateway_with_waf_v2_sku(self, resource_group):
self.cmd('network application-gateway create -g {rg} -n ag1 --sku WAF_v2 --public-ip-address pubip1 --no-wait')
self.cmd('network application-gateway wait -g {rg} -n ag1 --exists')
self.cmd('network application-gateway list --resource-group {rg}', checks=[
self.check('type(@)', 'array'),
self.check("length([?resourceGroup == '{}']) == length(@)".format(resource_group), True)
])
self.cmd('network application-gateway show --resource-group {rg} --name ag1', checks=[
self.check('type(@)', 'object'),
self.check('name', 'ag1'),
self.check('resourceGroup', resource_group),
self.check('frontendIpConfigurations[0].privateIpAllocationMethod', 'Dynamic')
])
@ResourceGroupPreparer(name_prefix='test_network_appgw_creation_with_public_and_private_ip')
def test_network_appgw_creation_with_public_and_private_ip(self, resource_group):
self.kwargs.update({
"appgw": "applicationGateway",
"ip": "publicIP",
})
self.cmd('network public-ip create -g {rg} -n {ip} --sku Standard')
self.cmd("network application-gateway create -g {rg} -n {appgw} "
"--sku Standard_v2 "
"--enable-private-link "
"--private-ip-address 10.0.0.17 "
"--public-ip-address {ip}")
show_data = self.cmd("network application-gateway show -g {rg} -n {appgw}").get_output_in_json()
self.assertEqual(len(show_data["frontendIpConfigurations"]), 2)
# Those assertions are not stable, because the order in array frontendIpConfigurations is not fixed
# self.assertTrue(show_data["frontendIpConfigurations"][0]["publicIpAddress"]["id"].endswith(self.kwargs["ip"]))
# self.assertTrue(show_data["frontendIpConfigurations"][1]["id"].endswith("appGatewayPrivateFrontendIP")) # default name
# self.assertEqual(show_data["frontendIpConfigurations"][1]["privateIpAddress"], "10.0.0.17")
# self.assertEqual(show_data["frontendIpConfigurations"][1]["privateLinkConfiguration"], None)
# service buggy, this properties won't be populcated so fast even in Azure CLI 2.12.0
# self.assertTrue(show_data["frontendIpConfigurations"][1]["privateLinkConfiguration"]["id"].endswith("PrivateLinkDefaultConfiguration"))
self.cmd("network application-gateway delete -g {rg} -n {appgw}")
class NetworkAppGatewayIndentityScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_ag_identity')
def test_network_app_gateway_with_identity(self, resource_group):
self.kwargs.update({
'rg': resource_group,
'gw': 'gateway',
'one_off_identity': 'id1',
'access_identity': 'id2',
'ip': 'ip1',
'kv': self.create_random_name('cli-test-keyvault-', 24),
'cert': 'MyCertificate'
})
# create a managed identity
self.cmd('identity create -g {rg} -n {one_off_identity}').get_output_in_json()
access_identity_result = self.cmd('identity create -g {rg} -n {access_identity}').get_output_in_json()
self.kwargs.update({
'access_identity_principal': access_identity_result['principalId']
})
self.cmd('keyvault create -g {rg} -n {kv} --sku premium')
self.cmd('keyvault set-policy -g {rg} -n {kv} '
'--object-id {access_identity_principal} --secret-permissions get list set')
self.cmd('keyvault update -n {kv} --enable-soft-delete -g {rg}')
# create a certificate
keyvault_cert_policy = self.cmd('az keyvault certificate get-default-policy').get_output_in_json()
self.kwargs.update({
'keyvault_cert_policy': keyvault_cert_policy
})
self.cmd('keyvault certificate create '
'--vault-name {kv} '
'--name {cert} '
'--policy "{keyvault_cert_policy}"')
cert_result = self.cmd('keyvault certificate show --vault-name {kv} --name {cert}').get_output_in_json()
self.kwargs.update({
'secret_id': cert_result['sid']
})
self.cmd('network public-ip create -g {rg} -n {ip} --sku Standard')
# create application-gateway with one_off_identity
self.cmd('network application-gateway create '
'-g {rg} -n {gw} '
'--sku Standard_v2 --public-ip-address {ip} '
'--identity {one_off_identity} ')
self.cmd('network application-gateway show -g {rg} -n {gw}', checks=[
self.check('identity.type', 'userAssigned')
])
# remove one_off_identity
self.cmd('network application-gateway identity remove -g {rg} --gateway-name {gw}', checks=[
self.check('identity', None)
])
# assign access_identity
self.cmd('network application-gateway identity assign '
'-g {rg} --gateway-name {gw} --identity {access_identity}',
checks=[
self.check('identity.type', 'userAssigned')
])
self.cmd('network application-gateway identity show -g {rg} --gateway-name {gw}', checks=[
self.check('type', 'userAssigned')
])
self.cmd('network application-gateway ssl-cert create '
' -g {rg} --gateway-name {gw} '
'--name MySSLCert '
'--key-vault-secret-id {secret_id}')
self.cmd('network application-gateway root-cert create -g {rg} --gateway-name {gw} -n cert1 --keyvault-secret {secret_id}', checks=[
self.check('trustedRootCertificates[0].keyVaultSecretId', '{secret_id}')
])
class NetworkAppGatewayTrustedClientCertScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_ag_trusted_client_cert')
def test_network_app_gateway_with_trusted_client_cert(self, resource_group):
self.kwargs.update({
'rg': resource_group,
'gw': 'gateway',
'ip': 'ip1',
'cert': os.path.join(TEST_DIR, 'client.cer'),
'cert1': os.path.join(TEST_DIR, 'client1.cer'),
'cname': 'cert_name',
'cname1': 'cert_name1',
})
# create an ag with trusted client cert
self.cmd('network public-ip create -g {rg} -n {ip} --sku Standard')
self.cmd('network application-gateway create -g {rg} -n {gw} --sku Standard_v2 --public-ip-address {ip} '
'--trusted-client-cert name={cname} data="{cert}"',
checks=[self.check('length(applicationGateway.trustedClientCertificates)', 1)])
self.cmd('network application-gateway client-cert add -g {rg} --gateway-name {gw} '
'--name {cname1} --data "{cert1}"',
checks=[self.check('length(trustedClientCertificates)', 2)])
self.cmd('network application-gateway client-cert list -g {rg} --gateway-name {gw}',
checks=[self.check('length(@)', 2)])
self.cmd('network application-gateway client-cert remove -g {rg} --gateway-name {gw} --name {cname1}',
checks=[self.check('length(trustedClientCertificates)', 1)])
class NetworkAppGatewaySslProfileScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_ag_ssl_profile')
def test_network_app_gateway_with_ssl_profile(self, resource_group):
self.kwargs.update({
'rg': resource_group,
'gw': 'gateway',
'ip': 'ip1',
'name': 'name',
'name1': 'name1',
})
# create an ag with ssl profile
self.cmd('network public-ip create -g {rg} -n {ip} --sku Standard')
self.cmd('network application-gateway create -g {rg} -n {gw} --sku Standard_v2 --public-ip-address {ip} '
'--ssl-profile name={name} client-auth-configuration=True min-protocol-version=TLSv1_0 '
'cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 policy-type=Custom',
checks=[self.check('length(applicationGateway.sslProfiles)', 1)])
self.cmd('network application-gateway ssl-profile add -g {rg} --gateway-name {gw} --name {name1} '
'--client-auth-configuration True --min-protocol-version TLSv1_0 '
'--cipher-suites TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 --policy-type Custom',
checks=[self.check('length(sslProfiles)', 2)])
self.cmd('network application-gateway ssl-profile list -g {rg} --gateway-name {gw}',
checks=[self.check('length(@)', 2)])
self.cmd('network application-gateway ssl-profile remove -g {rg} --gateway-name {gw} --name {name} ',
checks=[self.check('length(sslProfiles)', 1)])
class NetworkAppGatewayZoneScenario(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_ag_zone', location='westus2')
def test_network_ag_zone(self, resource_group):
self.kwargs.update({
'gateway': 'ag1',
'ip': 'pubip1'
})
# for public-ip after '2020-08-01', when set '-z 1 3', actually return 'zones:[1,2,3]'
self.cmd('network public-ip create -g {rg} -n {ip} --sku Standard -z 1 3', checks=[
self.check('length(publicIp.zones)', 3)
])
self.cmd('network application-gateway create -g {rg} -n {gateway} --sku Standard_v2 --min-capacity 2 --max-capacity 4 --zones 1 3 --public-ip-address {ip} --no-wait')
self.cmd('network application-gateway wait -g {rg} -n {gateway} --exists')
self.cmd('network application-gateway show -g {rg} -n {gateway}', checks=[
self.check('zones[0]', 1)
])
class NetworkAppGatewayAuthCertScenario(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_ag_auth_cert')
def test_network_ag_auth_cert(self, resource_group):
self.kwargs.update({
'gateway': 'ag1',
'cert1': 'cert1',
'cert1_file': os.path.join(TEST_DIR, 'AuthCert.pfx'),
'cert2': 'cert2',
'cert2_file': os.path.join(TEST_DIR, 'AuthCert2.pfx'),
'settings': 'https_settings'
})
self.cmd('network application-gateway create -g {rg} -n {gateway} --no-wait')
self.cmd('network application-gateway wait -g {rg} -n {gateway} --exists')
self.cmd('network application-gateway auth-cert create -g {rg} --gateway-name {gateway} -n {cert1} --cert-file "{cert1_file}" --no-wait')
self.cmd('network application-gateway auth-cert create -g {rg} --gateway-name {gateway} -n {cert2} --cert-file "{cert2_file}" --no-wait')
self.cmd('network application-gateway http-settings create -g {rg} --gateway-name {gateway} -n {settings} --auth-certs {cert1} {cert2} --no-wait --port 443 --protocol https')
self.cmd('network application-gateway http-settings update -g {rg} --gateway-name {gateway} -n {settings} --auth-certs {cert2} {cert1} --no-wait')
self.cmd('network application-gateway show -g {rg} -n {gateway}',
checks=self.check('length(backendHttpSettingsCollection[1].authenticationCertificates)', 2))
class NetworkAppGatewayTrustedRootCertScenario(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_ag_root_cert')
def test_network_ag_root_cert(self, resource_group):
self.kwargs.update({
'gateway': 'ag1',
'cert1': 'cert1',
'cert1_file': os.path.join(TEST_DIR, 'test-root-cert.cer'),
'cert2': 'cert2',
'cert2_file': os.path.join(TEST_DIR, 'test-root-cert-2.cer'),
'settings': 'https_settings',
'ip1': 'myip1'
})
self.cmd('network public-ip create -g {rg} -n {ip1} --sku Standard')
self.cmd('network application-gateway create -g {rg} -n {gateway} --sku Standard_v2 --public-ip-address {ip1}')
self.cmd('network application-gateway wait -g {rg} -n {gateway} --exists')
self.cmd('network application-gateway root-cert create -g {rg} --gateway-name {gateway} -n {cert1} --cert-file "{cert1_file}"')
self.cmd('network application-gateway root-cert create -g {rg} --gateway-name {gateway} -n {cert2} --cert-file "{cert2_file}"')
self.cmd('network application-gateway http-settings create -g {rg} --gateway-name {gateway} -n {settings} --root-certs {cert1} {cert2} --host-name-from-backend-pool true --no-wait --port 443 --protocol https')
self.cmd('network application-gateway http-settings update -g {rg} --gateway-name {gateway} -n {settings} --root-certs {cert2} {cert1} --no-wait')
self.cmd('network application-gateway show -g {rg} -n {gateway}',
checks=self.check('length(backendHttpSettingsCollection[1].trustedRootCertificates)', 2))
self.cmd('network application-gateway http-settings update -g {rg} --gateway-name {gateway} -n {settings} --no-wait')
class NetworkAppGatewayRedirectConfigScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_ag_basic')
def test_network_app_gateway_redirect_config(self, resource_group):
self.kwargs.update({
'gateway': 'ag1',
'name': 'redirect1'
})
self.cmd('network application-gateway create -g {rg} -n {gateway} --no-wait')
self.cmd('network application-gateway wait -g {rg} -n {gateway} --exists')
self.cmd('network application-gateway redirect-config create --gateway-name {gateway} -g {rg} -n {name} -t permanent --include-query-string --include-path false --target-listener appGatewayHttpListener --no-wait')
self.cmd('network application-gateway redirect-config show --gateway-name {gateway} -g {rg} -n {name}', checks=[
self.check('includePath', False),
self.check('includeQueryString', True),
self.check('redirectType', 'Permanent')
])
self.cmd('network application-gateway redirect-config update --gateway-name {gateway} -g {rg} -n {name} --include-path --include-query-string false --no-wait')
self.cmd('network application-gateway redirect-config show --gateway-name {gateway} -g {rg} -n {name}', checks=[
self.check('includePath', True),
self.check('includeQueryString', False),
self.check('redirectType', 'Permanent')
])
class NetworkAppGatewayExistingSubnetScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_ag_existing_subnet')
def test_network_app_gateway_with_existing_subnet(self, resource_group):
vnet = self.cmd('network vnet create -g {rg} -n vnet2 --subnet-name subnet1').get_output_in_json()
subnet_id = vnet['newVNet']['subnets'][0]['id']
self.kwargs['subnet_id'] = subnet_id
# make sure it fails
self.cmd('network application-gateway create -g {rg} -n ag2 --subnet {subnet_id} --subnet-address-prefix 10.0.0.0/28 --tags foo=doo', expect_failure=True)
# now verify it succeeds
self.cmd('network application-gateway create -g {rg} -n ag2 --subnet {subnet_id} --servers 172.0.0.1 www.mydomain.com', checks=[
self.check('applicationGateway.frontendIPConfigurations[0].properties.privateIPAllocationMethod', 'Dynamic'),
self.check('applicationGateway.frontendIPConfigurations[0].properties.subnet.id', subnet_id)
])
class NetworkAppGatewayNoWaitScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_ag_no_wait')
def test_network_app_gateway_no_wait(self, resource_group):
self.kwargs.update({
'tags': {u'a': u'b', u'c': u'd'}
})
self.cmd('network application-gateway create -g {rg} -n ag1 --no-wait --connection-draining-timeout 180', checks=self.is_empty())
self.cmd('network application-gateway create -g {rg} -n ag2 --no-wait --tags a=b c=d', checks=self.is_empty())
self.cmd('network application-gateway wait -g {rg} -n ag1 --created --interval 120', checks=self.is_empty())
self.cmd('network application-gateway wait -g {rg} -n ag2 --created --interval 120', checks=self.is_empty())
self.cmd('network application-gateway show -g {rg} -n ag1', checks=[
self.check('provisioningState', 'Succeeded'),
self.check('backendHttpSettingsCollection[0].connectionDraining.enabled', True),
self.check('backendHttpSettingsCollection[0].connectionDraining.drainTimeoutInSec', 180)
])
self.cmd('network application-gateway show -g {rg} -n ag2', checks=[
self.check('provisioningState', 'Succeeded'),
self.check('tags', '{tags}')
])
self.cmd('network application-gateway delete -g {rg} -n ag2 --no-wait')
self.cmd('network application-gateway wait -g {rg} -n ag2 --deleted')
class NetworkAppGatewayPrivateIpScenarioTest20170601(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_ag_private_ip')
def test_network_app_gateway_with_private_ip(self, resource_group):
self.kwargs.update({
'private_ip': '10.0.0.15',
'path': os.path.join(TEST_DIR, 'TestCert.pfx'),
'pass': 'password'
})
self.cmd('network application-gateway create -g {rg} -n ag3 --subnet subnet1 --private-ip-address {private_ip} --cert-file "{path}" --cert-password {pass} --no-wait')
self.cmd('network application-gateway wait -g {rg} -n ag3 --exists')
self.cmd('network application-gateway show -g {rg} -n ag3', checks=[
self.check('frontendIpConfigurations[0].privateIpAddress', '{private_ip}'),
self.check('frontendIpConfigurations[0].privateIpAllocationMethod', 'Static')
])
self.kwargs['path'] = os.path.join(TEST_DIR, 'TestCert2.pfx')
self.cmd('network application-gateway ssl-cert update -g {rg} --gateway-name ag3 -n ag3SslCert --cert-file "{path}" --cert-password {pass}')
self.cmd('network application-gateway wait -g {rg} -n ag3 --updated')
self.cmd('network application-gateway ssl-policy set -g {rg} --gateway-name ag3 --disabled-ssl-protocols TLSv1_0 TLSv1_1 --no-wait')
self.cmd('network application-gateway ssl-policy show -g {rg} --gateway-name ag3',
checks=self.check('disabledSslProtocols.length(@)', 2))
cipher_suite = 'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256'
self.kwargs['cipher'] = cipher_suite
self.cmd('network application-gateway ssl-policy set -g {rg} --gateway-name ag3 --min-protocol-version TLSv1_0 --cipher-suites {cipher} --no-wait')
self.cmd('network application-gateway ssl-policy show -g {rg} --gateway-name ag3', checks=[
self.check('cipherSuites.length(@)', 1),
self.check('minProtocolVersion', 'TLSv1_0'),
self.check('policyType', 'Custom')
])
policy_name = 'AppGwSslPolicy20150501'
self.kwargs['policy'] = policy_name
self.cmd('network application-gateway ssl-policy set -g {rg} --gateway-name ag3 -n {policy} --no-wait')
self.cmd('network application-gateway ssl-policy show -g {rg} --gateway-name ag3', checks=[
self.check('policyName', policy_name),
self.check('policyType', 'Predefined')
])
class NetworkAppGatewaySubresourceScenarioTest(ScenarioTest):
def _create_ag(self):
self.cmd('network application-gateway create -g {rg} -n {ag} --no-wait')
self.cmd('network application-gateway wait -g {rg} -n {ag} --exists')
@ResourceGroupPreparer(name_prefix='cli_test_ag_address_pool')
def test_network_ag_address_pool(self, resource_group):
self.kwargs.update({
'ag': 'ag1',
'res': 'application-gateway address-pool',
'name': 'pool1'
})
self._create_ag()
self.cmd('network {res} create -g {rg} --gateway-name {ag} -n {name} --no-wait --servers 123.4.5.6 www.mydns.com')
self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}', checks=[
self.check('length(backendAddresses)', 2),
self.check('backendAddresses[0].ipAddress', '123.4.5.6'),
self.check('backendAddresses[1].fqdn', 'www.mydns.com'),
])
self.cmd('network {res} update -g {rg} --gateway-name {ag} -n {name} --no-wait --servers 5.4.3.2')
self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}', checks=[
self.check('length(backendAddresses)', 1),
self.check('backendAddresses[0].ipAddress', '5.4.3.2')
])
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 2))
self.cmd('network {res} delete -g {rg} --gateway-name {ag} --no-wait -n {name}')
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 1))
@ResourceGroupPreparer(name_prefix='cli_test_ag_frontend_port')
def test_network_ag_frontend_port(self, resource_group):
self.kwargs.update({
'ag': 'ag1',
'res': 'application-gateway frontend-port',
'name': 'myport'
})
self._create_ag()
self.cmd('network {res} create -g {rg} --gateway-name {ag} -n {name} --no-wait --port 111')
self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}', checks=[
self.check('name', 'myport'),
self.check('port', 111)
])
self.cmd('network {res} update -g {rg} --gateway-name {ag} -n {name} --no-wait --port 112')
self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}', checks=[
self.check('name', 'myport'),
self.check('port', 112)
])
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 2))
self.cmd('network {res} delete -g {rg} --gateway-name {ag} --no-wait -n {name}')
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 1))
@ResourceGroupPreparer(name_prefix='cli_test_ag_frontend_ip_public')
def test_network_ag_frontend_ip_public(self, resource_group):
self.kwargs.update({
'ag': 'ag1',
'res': 'application-gateway frontend-ip',
'name': 'myfrontend',
'ip1': 'myip1',
'ip2': 'myip2'
})
self.cmd('network application-gateway create -g {rg} -n {ag} --no-wait')
self.cmd('network application-gateway wait -g {rg} -n {ag} --exists')
self.cmd('network public-ip create -g {rg} -n {ip1}')
self.cmd('network public-ip create -g {rg} -n {ip2}')
self.cmd('network {res} create -g {rg} --gateway-name {ag} -n {name} --no-wait --public-ip-address {ip1}')
self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}', checks=[
self.check('subnet', None)
])
# NOTE: Service states that public IP address cannot be changed. https://github.com/Azure/azure-cli/issues/4133
# self.cmd('network {res} update -g {rg} --gateway-name {ag} -n {name} --no-wait --public-ip-address {ip2}')
# self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}')
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 2))
self.cmd('network {res} delete -g {rg} --gateway-name {ag} --no-wait -n {name}')
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 1))
@ResourceGroupPreparer(name_prefix='cli_test_ag_frontend_ip_private')
def test_network_ag_frontend_ip_private(self, resource_group):
self.kwargs.update({
'ag': 'ag1',
'res': 'application-gateway frontend-ip',
'name': 'frontendip',
'ip1': 'myip1',
'vnet1': 'vnet1',
'vnet2': 'vnet2',
'subnet': 'subnet1'
})
self.cmd('network public-ip create -g {rg} -n {ip1}')
self.cmd('network vnet create -g {rg} -n {vnet1} --subnet-name {subnet}')
self.cmd('network application-gateway create -g {rg} -n {ag} --no-wait --public-ip-address {ip1} --vnet-name {vnet1} --subnet {subnet}')
self.cmd('network application-gateway wait -g {rg} -n {ag} --exists')
self.cmd('network {res} create -g {rg} --gateway-name {ag} -n {name} --no-wait --private-ip-address 10.0.0.10 --vnet-name {vnet1} --subnet {subnet}')
self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}', checks=[
])
# NOTE: Service states that frontend subnet cannot differ from gateway subnet https://github.com/Azure/azure-cli/issues/4134
# self.cmd('network vnet create -g {rg} -n {vnet2} --subnet-name {subnet} --address-prefix 10.0.0.0/16 --subnet-prefix 10.0.10.0/24')
# self.cmd('network {res} update -g {rg} --gateway-name {ag} -n {name} --no-wait --private-ip-address 11.0.10.10 --vnet-name {vnet2} --subnet {subnet}')
# self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}')
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 2))
self.cmd('network {res} delete -g {rg} --gateway-name {ag} --no-wait -n {name}')
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 1))
@ResourceGroupPreparer(name_prefix='cli_test_ag_http_listener')
def test_network_ag_http_listener(self, resource_group):
self.kwargs.update({
'ag': 'ag1',
'res': 'application-gateway http-listener',
'name': 'mylistener'
})
self._create_ag()
self.cmd('network {res} create -g {rg} --gateway-name {ag} -n {name} --no-wait --frontend-port appGatewayFrontendPort --host-name www.test.com')
self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}', checks=[
self.check('hostName', 'www.test.com')
])
self.cmd('network {res} update -g {rg} --gateway-name {ag} -n {name} --no-wait --host-name www.test2.com')
self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}', checks=[
self.check('hostName', 'www.test2.com')
])
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 2))
self.cmd('network {res} delete -g {rg} --gateway-name {ag} --no-wait -n {name}')
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 1))
@ResourceGroupPreparer(name_prefix='test_network_ag_http_listener_with_waf_policy')
def test_network_ag_http_listener_with_waf_policy(self):
self.kwargs.update({
'ag': 'ag1',
'res': 'application-gateway http-listener',
'name': 'mylistener',
'waf_1': 'waf_1',
'waf_2': 'waf_2',
'waf_3': 'waf_3',
'listener_2': 'listener_2',
})
# prepare 2 WAF policies
self.cmd('network application-gateway waf-policy create -g {rg} --name {waf_1}')
self.cmd('network application-gateway waf-policy create -g {rg} --name {waf_2}')
self.cmd('network application-gateway waf-policy create -g {rg} --name {waf_3}')
self.cmd('network public-ip create -g {rg} -n ip-1 --sku Standard')
# sku=WAF_v2 is necessary for updating HTTP listener's WAF configuration
create_res = self.cmd('network application-gateway create -g {rg} --name {ag} --public-ip-address ip-1 --sku WAF_v2').get_output_in_json()
self.assertEqual(len(create_res['applicationGateway']['httpListeners']), 1)
self.assertIsNone(create_res['applicationGateway']['httpListeners'][0].get('firewallPolicy'))
# update the default HTTP listener's WAF policy
update_res = self.cmd('network application-gateway http-listener update -g {rg} --gateway-name {ag} --name appGatewayHttpListener --waf-policy {waf_1}').get_output_in_json()
self.assertTrue(update_res['firewallPolicy']['id'].endswith('waf_1'))
# create another HTTP listener with WAF policy and update it to another WAF policy
self.cmd('network application-gateway frontend-port create -g {rg} --gateway-name {ag} --port 9020 --name 9020')
self.cmd('network application-gateway http-listener create -g {rg} --gateway-name {ag} --name {listener_2} --frontend-port 9020 --waf-policy {waf_2}')
update_res = self.cmd('network application-gateway http-listener update -g {rg} --gateway-name {ag} --name {listener_2} --waf-policy {waf_3}').get_output_in_json()
self.assertTrue(update_res['firewallPolicy']['id'].endswith('waf_3'))
self.cmd('network application-gateway show -g {rg} --name {ag}')
@ResourceGroupPreparer(name_prefix='cli_test_ag_http_listener')
def test_network_ag_http_listener_with_multi_host_names(self, resource_group):
self.kwargs.update({
'ag': 'ag1',
'res': 'application-gateway http-listener',
'name': 'mylistener',
'gateway_ip': 'ip1',
'port': 'cliport'
})
self.cmd('network public-ip create -g {rg} -n {gateway_ip} --sku Standard')
self.cmd('network application-gateway create -g {rg} -n {ag} '
'--sku WAF_v2 '
'--public-ip-address {gateway_ip} ')
self.cmd('network application-gateway frontend-port create -g {rg} --gateway-name {ag} -n {port} --port 18080')
self.cmd('network {res} create -g {rg} --gateway-name {ag} -n {name} --frontend-port {port} --host-names "*.contoso.com" "www.microsoft.com"')
self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}', checks=[
self.check('length(hostNames)', 2),
self.check('hostNames[0]', "*.contoso.com"),
self.check('hostNames[1]', "www.microsoft.com")
])
self.cmd('network {res} update -g {rg} --gateway-name {ag} -n {name} --host-names "*.contoso.com" "www.bing.com"')
self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}', checks=[
self.check('length(hostNames)', 2),
self.check('hostNames[0]', "*.contoso.com"),
self.check('hostNames[1]', "www.bing.com")
])
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 2))
self.cmd('network {res} delete -g {rg} --gateway-name {ag} --no-wait -n {name}')
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 1))
@ResourceGroupPreparer(name_prefix='cli_test_ag_http_settings')
def test_network_ag_http_settings(self, resource_group):
self.kwargs.update({
'ag': 'ag1',
'res': 'application-gateway http-settings',
'name': 'mysettings'
})
self._create_ag()
self.cmd('network {res} create -g {rg} --gateway-name {ag} -n {name} --no-wait --affinity-cookie-name mycookie --connection-draining-timeout 60 --cookie-based-affinity --host-name-from-backend-pool --protocol https --timeout 50 --port 70')
self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}', checks=[
self.check('affinityCookieName', 'mycookie'),
self.check('connectionDraining.drainTimeoutInSec', 60),
self.check('connectionDraining.enabled', True),
self.check('cookieBasedAffinity', 'Enabled'),
self.check('pickHostNameFromBackendAddress', True),
self.check('port', 70),
self.check('protocol', 'Https'),
self.check('requestTimeout', 50)
])
self.cmd('network {res} update -g {rg} --gateway-name {ag} -n {name} --no-wait --affinity-cookie-name mycookie2 --connection-draining-timeout 0 --cookie-based-affinity disabled --host-name-from-backend-pool false --protocol http --timeout 40 --port 71')
self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}', checks=[
self.check('affinityCookieName', 'mycookie2'),
self.check('connectionDraining.drainTimeoutInSec', 1),
self.check('connectionDraining.enabled', False),
self.check('cookieBasedAffinity', 'Disabled'),
self.check('pickHostNameFromBackendAddress', False),
self.check('port', 71),
self.check('protocol', 'Http'),
self.check('requestTimeout', 40)
])
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 2))
self.cmd('network {res} delete -g {rg} --gateway-name {ag} --no-wait -n {name}')
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 1))
@ResourceGroupPreparer(name_prefix='cli_test_ag_probe')
def test_network_ag_probe(self, resource_group):
self.kwargs.update({
'rg': resource_group,
'gateway_ip': 'gateway_ip',
'ag': 'ag1',
'res': 'application-gateway probe',
'name': 'myprobe'
})
self.cmd('network public-ip create -g {rg} -n {gateway_ip} --sku Standard')
self.cmd('network application-gateway create -g {rg} -n {ag} '
'--sku WAF_v2 '
'--public-ip-address {gateway_ip} ')
self.cmd('network {res} create -g {rg} --gateway-name {ag} -n {name} --no-wait '
'--path /test '
'--protocol http '
'--interval 25 '
'--timeout 100 '
'--threshold 10 '
'--min-servers 2 '
'--host www.test.com '
'--match-status-codes 200 204 '
'--host-name-from-http-settings false '
'--port 2048 ')
self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}', checks=[
self.check('path', '/test'),
self.check('protocol', 'Http'),
self.check('interval', 25),
self.check('timeout', 100),
self.check('unhealthyThreshold', 10),
self.check('minServers', 2),
self.check('host', 'www.test.com'),
self.check('length(match.statusCodes)', 2),
self.check('pickHostNameFromBackendHttpSettings', False),
self.check('port', 2048)
])
self.cmd('network {res} update -g {rg} --gateway-name {ag} -n {name} --no-wait '
'--path /test2 '
'--protocol https '
'--interval 26 '
'--timeout 101 '
'--threshold 11 '
'--min-servers 3 '
'--host "" '
'--match-status-codes 201 '
'--host-name-from-http-settings '
'--port 4096 ')
self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}', checks=[
self.check('path', '/test2'),
self.check('protocol', 'Https'),
self.check('interval', 26),
self.check('timeout', 101),
self.check('unhealthyThreshold', 11),
self.check('minServers', 3),
self.check('host', ''),
self.check('length(match.statusCodes)', 1),
self.check('pickHostNameFromBackendHttpSettings', True),
self.check('port', 4096)
])
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 1))
self.cmd('network {res} delete -g {rg} --gateway-name {ag} --no-wait -n {name}')
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=self.check('length(@)', 0))
@ResourceGroupPreparer(name_prefix='cli_test_ag_rule')
def test_network_ag_rule(self, resource_group):
self.kwargs.update({
'ip': 'pip1',
'ag': 'ag1',
'res': 'application-gateway rule',
'name': 'myrule',
'name2': 'myrule2',
'set': 'myruleset'
})
self.cmd('network public-ip create -g {rg} -n {ip} --sku Standard')
self.cmd('network application-gateway create -g {rg} -n {ag} --public-ip-address {ip} --sku Standard_v2 --no-wait')
self.cmd('network application-gateway wait -g {rg} -n {ag} --exists')
# Make the default rule has priority.
# Otherwise, server will raise ApplicationGatewayRequestRoutingRulePartialPriorityDefined
self.cmd('network {res} update -g {rg} --gateway-name {ag} -n rule1 --priority 1')
self.cmd('network application-gateway http-listener create -g {rg} --gateway-name {ag} -n mylistener --no-wait --frontend-port appGatewayFrontendPort --host-name www.test.com')
self.cmd('network application-gateway http-listener create -g {rg} --gateway-name {ag} -n mylistener2 --no-wait --frontend-port appGatewayFrontendPort --host-name www.test2.com')
self.cmd('network {res} create -g {rg} --gateway-name {ag} -n {name} --no-wait --http-listener mylistener --priority 12')
rule = self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}').get_output_in_json()
self.assertTrue(rule['httpListener']['id'].endswith('mylistener'))
self.cmd('network {res} update -g {rg} --gateway-name {ag} -n {name} --no-wait --http-listener mylistener2 --priority 32')
rule = self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name}').get_output_in_json()
self.assertTrue(rule['httpListener']['id'].endswith('mylistener2'))
self.cmd('network application-gateway rewrite-rule set create -g {rg} --gateway-name {ag} -n {set}')
self.cmd('network {res} create -g {rg} --gateway-name {ag} -n {name2} --no-wait --rewrite-rule-set {set} --http-listener mylistener --priority 10')
rule = self.cmd('network {res} show -g {rg} --gateway-name {ag} -n {name2}').get_output_in_json()
self.kwargs['set_id'] = rule['rewriteRuleSet']['id']
self.cmd('network {res} update -g {rg} --gateway-name {ag} -n {name2} --rewrite-rule-set {set_id} --priority 21', checks=[
self.check('rewriteRuleSet.id', '{set_id}')
])
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=[
self.check('length(@)', 3)
])
self.cmd('network {res} delete -g {rg} --gateway-name {ag} --no-wait -n {name}')
self.cmd('network {res} list -g {rg} --gateway-name {ag}', checks=[
self.check('length(@)', 2)
])
@ResourceGroupPreparer(name_prefix='cli_test_ag_url_path_map')
def test_network_ag_url_path_map(self, resource_group):
self.kwargs.update({
'ip': 'pip1',
'ag': 'ag1',
'name': 'mypathmap',
'rulename': 'myurlrule',
'rulename2': 'myurlrule2',
'pool': 'mypool',
'set': 'myruleset',
'settings': 'http_settings',
'rg': resource_group
})
self.cmd('network public-ip create -g {rg} -n {ip} --sku Standard')
self.cmd('network application-gateway create -g {rg} -n {ag} --public-ip-address {ip} --sku Standard_v2 --no-wait')
self.cmd('network application-gateway wait -g {rg} -n {ag} --exists')
self.cmd('network application-gateway http-listener create -g {rg} --gateway-name {ag} -n mylistener --no-wait --frontend-port appGatewayFrontendPort --host-name www.test.com')
self.cmd('network application-gateway rewrite-rule set create -g {rg} --gateway-name {ag} -n {set}')
self.cmd('network application-gateway address-pool create -g {rg} --gateway-name {ag} -n {pool} --no-wait')
self.cmd('network application-gateway http-settings create -g {rg} --gateway-name {ag} -n {settings} --port 443 --protocol https')
self.cmd('network application-gateway url-path-map create -g {rg} --gateway-name {ag} -n {name} --rule-name {rulename} --paths /mypath1/* --address-pool {pool} '
'--default-address-pool {pool} --http-settings {settings} --default-http-settings {settings} '
'--default-rewrite-rule-set {set} --rewrite-rule-set {set}')
self.cmd('network application-gateway url-path-map update -g {rg} --gateway-name {ag} -n {name} --default-rewrite-rule-set {set}')
self.cmd('network application-gateway url-path-map rule create -g {rg} --gateway-name {ag} -n {rulename2} --path-map-name {name} '
'--paths /mypath122/* --address-pool {pool} --http-settings {settings} --rewrite-rule-set {set}')
@ResourceGroupPreparer(name_prefix='cli_test_ag_url_path_map_edge_case')
def test_network_ag_url_path_map_edge_case(self, resource_group):
self.kwargs.update({
'ip': 'pip1',
'ag': 'ag1',
'name': 'mypathmap',
'rulename': 'myurlrule',
'rulename2': 'myurlrule2',
'pool': 'mypool',
'set': 'myruleset',
'settings': 'http_settings',
'redirect_config': 'myconfig',
'rg': resource_group
})
self.cmd('network public-ip create -g {rg} -n {ip} --sku Standard')
self.cmd(
'network application-gateway create -g {rg} -n {ag} --public-ip-address {ip} --sku Standard_v2 --no-wait')
self.cmd('network application-gateway wait -g {rg} -n {ag} --exists')
self.cmd(
'network application-gateway http-listener create -g {rg} --gateway-name {ag} -n mylistener --no-wait --frontend-port appGatewayFrontendPort --host-name www.test.com')
self.cmd('network application-gateway rewrite-rule set create -g {rg} --gateway-name {ag} -n {set}')
self.cmd('network application-gateway redirect-config create -g {rg} --gateway-name {ag} -n {redirect_config} '
'--target-listener mylistener --type Permanent')
self.cmd('network application-gateway address-pool create -g {rg} --gateway-name {ag} -n {pool} --no-wait')
self.cmd(
'network application-gateway http-settings create -g {rg} --gateway-name {ag} -n {settings} --port 443 --protocol https')
self.cmd(
'network application-gateway url-path-map create -g {rg} --gateway-name {ag} -n {name} --rule-name {rulename} --paths /mypath1/* '
'--redirect-config {redirect_config} --default-redirect-config {redirect_config}')
self.cmd(
'network application-gateway url-path-map rule create -g {rg} --gateway-name {ag} -n {rulename2} --path-map-name {name} '
'--paths /mypath122/* --address-pool {pool} --http-settings {settings}')
with self.assertRaisesRegexp(CLIError, "Cannot reference a BackendAddressPool when Redirect Configuration is specified."):
self.cmd(
'network application-gateway url-path-map rule create -g {rg} --gateway-name {ag} -n {rulename2} --path-map-name {name} '
'--paths /mypath122/* --address-pool {pool} --http-settings {settings} --redirect-config {redirect_config}')
class NetworkAppGatewayRewriteRuleset(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_ag_rewrite_rulesets')
def test_network_app_gateway_rewrite_rulesets(self, resource_group):
self.kwargs.update({
'gw': 'gw1',
'ip': 'pip1',
'set': 'ruleset1',
'rule': 'rule1',
'rule2': 'rule2',
'var': 'http_req_Authorization'
})
self.cmd('network public-ip create -g {rg} -n {ip} --sku Standard')
self.cmd('network application-gateway create -g {rg} -n {gw} --public-ip-address {ip} --sku Standard_v2 --no-wait')
self.cmd('network application-gateway wait -g {rg} -n {gw} --exists')
# create ruleset
self.cmd('network application-gateway rewrite-rule set create -g {rg} --gateway-name {gw} -n {set} --no-wait')
self.cmd('network application-gateway rewrite-rule set show -g {rg} --gateway-name {gw} -n {set}')
# manage rewrite rules
self.cmd('network application-gateway rewrite-rule create -g {rg} --gateway-name {gw} --rule-set-name {set} -n {rule} --sequence 123 --request-headers foo=bar --response-headers cat=hat --no-wait')
self.cmd('network application-gateway rewrite-rule update -g {rg} --gateway-name {gw} --rule-set-name {set} -n {rule} --sequence 321 --request-headers bar=foo --response-headers hat=cat --no-wait')
self.cmd('network application-gateway rewrite-rule update -g {rg} --gateway-name {gw} --rule-set-name {set} -n {rule} --set ruleSequence=321 --remove actionSet.responseHeaderConfigurations 0 --no-wait')
self.cmd('network application-gateway rewrite-rule show -g {rg} --gateway-name {gw} --rule-set-name {set} -n {rule}')
self.cmd('network application-gateway rewrite-rule list -g {rg} --gateway-name {gw} --rule-set-name {set}')
self.cmd('network application-gateway rewrite-rule list-request-headers')
self.cmd('network application-gateway rewrite-rule list-response-headers')
# manage rewrite rules with url configuration
self.cmd('network application-gateway rewrite-rule create -g {rg} --gateway-name {gw} --rule-set-name {set} -n {rule2} '
'--sequence 123 --request-headers foo=bar --response-headers cat=hat '
'--modified-path "/def" --modified-query-string "a=b&c=d%20f"',
checks=[
self.check('actionSet.urlConfiguration.modifiedPath', '/def'),
self.check('actionSet.urlConfiguration.modifiedQueryString', 'a=b&c=d%20f'),
self.check('actionSet.urlConfiguration.reroute', False)
])
self.cmd('network application-gateway rewrite-rule update -g {rg} --gateway-name {gw} --rule-set-name {set} -n {rule2} '
'--sequence 321 --request-headers bar=foo --response-headers hat=cat '
'--modified-path "/def2" --modified-query-string "a=b&c=d%20f12" --enable-reroute',
checks=[
self.check('actionSet.urlConfiguration.modifiedPath', '/def2'),
self.check('actionSet.urlConfiguration.modifiedQueryString', 'a=b&c=d%20f12'),
self.check('actionSet.urlConfiguration.reroute', True)
])
self.cmd('network application-gateway rewrite-rule update -g {rg} --gateway-name {gw} --rule-set-name {set} -n {rule2} --set ruleSequence=321 --remove actionSet.responseHeaderConfigurations 0 --no-wait')
self.cmd('network application-gateway rewrite-rule show -g {rg} --gateway-name {gw} --rule-set-name {set} -n {rule2}')
# manage rewrite rule conditions
self.cmd('network application-gateway rewrite-rule condition create -g {rg} --gateway-name {gw} --rule-set-name {set} --rule-name {rule} --variable {var} --pattern "^Bearer" --ignore-case false --negate --no-wait')
self.cmd('network application-gateway rewrite-rule condition update -g {rg} --gateway-name {gw} --rule-set-name {set} --rule-name {rule} --variable {var} --pattern "^Bearers" --no-wait')
self.cmd('network application-gateway rewrite-rule condition show -g {rg} --gateway-name {gw} --rule-set-name {set} --rule-name {rule} --variable {var}')
self.cmd('network application-gateway rewrite-rule condition list -g {rg} --gateway-name {gw} --rule-set-name {set} --rule-name {rule}')
self.cmd('network application-gateway rewrite-rule condition delete -g {rg} --gateway-name {gw} --rule-set-name {set} --rule-name {rule} --variable {var} --no-wait')
self.cmd('network application-gateway rewrite-rule condition list -g {rg} --gateway-name {gw} --rule-set-name {set} --rule-name {rule}')
self.cmd('network application-gateway rewrite-rule condition list-server-variables')
self.cmd('network application-gateway rewrite-rule delete -g {rg} --gateway-name {gw} --rule-set-name {set} -n {rule} --no-wait')
self.cmd('network application-gateway rewrite-rule list -g {rg} --gateway-name {gw} --rule-set-name {set}')
self.cmd('network application-gateway rewrite-rule set delete -g {rg} --gateway-name {gw} -n {set} --no-wait')
self.cmd('network application-gateway rewrite-rule set list -g {rg} --gateway-name {gw}')
class NetworkAppGatewayPublicIpScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_ag_public_ip')
def test_network_app_gateway_with_public_ip(self, resource_group):
self.kwargs['ip'] = 'publicip4'
self.cmd('network application-gateway create -g {rg} -n test4 --subnet subnet1 --vnet-name vnet4 --vnet-address-prefix 10.0.0.1/16 --subnet-address-prefix 10.0.0.1/28 --public-ip-address {ip}', checks=[
self.check("applicationGateway.frontendIPConfigurations[0].properties.publicIPAddress.contains(id, '{ip}')", True),
self.check('applicationGateway.frontendIPConfigurations[0].properties.privateIPAllocationMethod', 'Dynamic')
])
class NetworkAppGatewayWafConfigScenarioTest20170301(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_app_gateway_waf_config')
def test_network_app_gateway_waf_config(self, resource_group):
self.kwargs.update({
'ip': 'pip1',
'ag': 'ag1'
})
self.cmd('network application-gateway create -g {rg} -n {ag} --subnet subnet1 --vnet-name vnet1 --public-ip-address {ip} --sku WAF_Medium --no-wait')
self.cmd('network application-gateway wait -g {rg} -n {ag} --exists')
self.cmd('network application-gateway show -g {rg} -n {ag}', checks=[
self.check("frontendIpConfigurations[0].publicIpAddress.contains(id, '{ip}')", True),
self.check('frontendIpConfigurations[0].privateIpAllocationMethod', 'Dynamic')
])
self.cmd('network application-gateway waf-config set -g {rg} --gateway-name {ag} --enabled true --firewall-mode prevention --rule-set-version 2.2.9 --disabled-rule-groups crs_30_http_policy --disabled-rules 981175 981176 --no-wait')
self.cmd('network application-gateway waf-config show -g {rg} --gateway-name {ag}', checks=[
self.check('enabled', True),
self.check('firewallMode', 'Prevention'),
self.check('length(disabledRuleGroups)', 2),
self.check('length(disabledRuleGroups[1].rules)', 2)
])
class NetworkAppGatewayWafV2ConfigScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_app_gateway_waf_v2_config')
def test_network_app_gateway_waf_v2_config(self, resource_group):
self.kwargs.update({
'ip': 'pip1',
'ag': 'ag1'
})
self.cmd('network public-ip create -g {rg} -n {ip} --sku standard')
self.cmd('network application-gateway create -g {rg} -n {ag} --subnet subnet1 --vnet-name vnet1 --public-ip-address {ip} --sku WAF_v2 --no-wait')
self.cmd('network application-gateway wait -g {rg} -n {ag} --exists')
self.cmd('network application-gateway waf-config set -g {rg} --gateway-name ag1 --enabled true --firewall-mode prevention --rule-set-version 3.0 --exclusion RequestHeaderNames StartsWith abc --exclusion RequestArgNames Equals def --no-wait')
self.cmd('network application-gateway waf-config show -g {rg} --gateway-name ag1', checks=[
self.check('enabled', True),
self.check('length(exclusions)', 2)
])
class NetworkAppGatewayWafPolicyScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_app_gateway_waf_policy_')
def test_network_app_gateway_waf_policy_with_application_gateway(self, resource_group):
self.kwargs.update({
'waf': 'agp1',
'custom-rule1': 'rule1',
'custom-rule2': 'rule2',
'ip1': 'pip1',
'ag1': 'ag1',
'ip2': 'pip2',
'ag2': 'ag2',
'rg': resource_group,
'csr_grp1': 'REQUEST-921-PROTOCOL-ATTACK',
'csr_grp2': 'REQUEST-913-SCANNER-DETECTION'
})
# create a waf policy
self.cmd('network application-gateway waf-policy create -g {rg} -n {waf}')
self.cmd('network application-gateway waf-policy update -g {rg} -n {waf} --tags test=best',
checks=self.check('tags.test', 'best'))
self.cmd('network application-gateway waf-policy show -g {rg} -n {waf}')
self.cmd('network application-gateway waf-policy list -g {rg}',
checks=self.check('length(@)', 1))
# add two custom rules of this waf-policy
self.cmd('network application-gateway waf-policy custom-rule create -g {rg} '
'--policy-name {waf} -n {custom-rule1} '
'--priority 50 --action log --rule-type MatchRule',
checks=self.check('priority', 50))
self.cmd('network application-gateway waf-policy custom-rule create -g {rg} '
'--policy-name {waf} -n {custom-rule2} '
'--priority 100 --action log --rule-type MatchRule')
# update some policy settings of this waf-policy
self.cmd('network application-gateway waf-policy policy-setting update -g {rg} --policy-name {waf} '
'--state Enabled --file-upload-limit-in-mb 64 --mode Prevention')
# add two managed rule set to the managed rules of this waf-policy
self.cmd('network application-gateway waf-policy managed-rule rule-set add -g {rg} --policy-name {waf} '
'--type OWASP --version 3.0',
checks=[
self.check('managedRules.managedRuleSets[0].ruleSetType', 'OWASP'),
self.check('managedRules.managedRuleSets[0].ruleSetVersion', '3.0')
])
self.cmd('network application-gateway waf-policy managed-rule rule-set add -g {rg} --policy-name {waf} '
'--type OWASP --version 3.0 '
'--group-name {csr_grp1} --rules 921100 921110')
self.cmd('network application-gateway waf-policy managed-rule rule-set add -g {rg} --policy-name {waf} '
'--type OWASP --version 3.0 '
'--group-name {csr_grp2} --rules 913100')
self.cmd('network application-gateway waf-policy managed-rule rule-set add -g {rg} --policy-name {waf} '
'--type Microsoft_BotManagerRuleSet --version 0.1',
checks=[
self.check('managedRules.managedRuleSets[1].ruleSetType', 'Microsoft_BotManagerRuleSet'),
self.check('managedRules.managedRuleSets[1].ruleSetVersion', '0.1')
])
# add one exclusion rule to the managed rules of this waf-policy
self.cmd('network application-gateway waf-policy managed-rule exclusion add -g {rg} --policy-name {waf} '
'--match-variable "RequestHeaderNames" --selector-match-operator "StartsWith" --selector "Bing"')
self.cmd('network application-gateway waf-policy show -g {rg} -n {waf}', checks=[
self.check('customRules | length(@)', 2),
self.check('customRules[0].priority', 50),
self.check('customRules[1].priority', 100),
self.check('managedRules.managedRuleSets[0].ruleSetType', 'OWASP'),
self.check('managedRules.managedRuleSets[0].ruleSetVersion', '3.0'),
self.check('managedRules.managedRuleSets[0].ruleGroupOverrides[0].rules | length(@)', 2),
self.check('managedRules.managedRuleSets[0].ruleGroupOverrides[0].ruleGroupName', self.kwargs['csr_grp1']),
self.check('managedRules.managedRuleSets[0].ruleGroupOverrides[0].rules[0].ruleId', '921100'),
self.check('managedRules.managedRuleSets[0].ruleGroupOverrides[1].ruleGroupName', self.kwargs['csr_grp2']),
self.check('managedRules.managedRuleSets[0].ruleGroupOverrides[1].rules[0].ruleId', '913100'),
self.check('managedRules.managedRuleSets[1].ruleSetType', 'Microsoft_BotManagerRuleSet'),
self.check('managedRules.managedRuleSets[1].ruleSetVersion', '0.1'),
self.check('policySettings.fileUploadLimitInMb', 64),
self.check('policySettings.maxRequestBodySizeInKb', 128),
self.check('policySettings.mode', 'Prevention'),
self.check('policySettings.requestBodyCheck', False),
self.check('policySettings.state', 'Enabled')
])
# prepare two IPs
self.cmd('network public-ip create -g {rg} -n {ip1} --sku standard')
self.cmd('network public-ip create -g {rg} -n {ip2} --sku standard')
# create two application gateways and assign with the same waf-policy
self.cmd('network application-gateway create -g {rg} -n {ag1} '
'--subnet subnet1 --vnet-name vnet1 --public-ip-address {ip1} --sku WAF_v2 --waf-policy {waf}')
self.cmd('network application-gateway create -g {rg} -n {ag2} '
'--subnet subnet2 --vnet-name vnet2 --public-ip-address {ip2} --sku WAF_v2 --waf-policy {waf}')
self.cmd('network application-gateway show -g {rg} -n {ag1}',
checks=self.check("firewallPolicy.contains(id, '{waf}')", True))
self.cmd('network application-gateway show -g {rg} -n {ag2}',
checks=self.check("firewallPolicy.contains(id, '{waf}')", True))
# test assigned application-gateways
self.cmd('network application-gateway waf-policy show -g {rg} -n {waf}', checks=[
self.check('applicationGateways | length(@)', 2),
self.check("contains(applicationGateways[0].id, '{ag1}')", True),
self.check("contains(applicationGateways[1].id, '{ag2}')", True)
])
@ResourceGroupPreparer(name_prefix='cli_test_app_gateway_waf_custom_rule_')
def test_network_app_gateway_waf_custom_rule(self, resource_group):
self.kwargs.update({
'waf': 'agp1',
'rule': 'rule1',
'ip': 'pip1',
'ag': 'ag1',
'rg': resource_group
})
# create a waf-policy with empty custom rule
self.cmd('network application-gateway waf-policy create -g {rg} -n {waf}')
self.cmd('network application-gateway waf-policy show -g {rg} -n {waf}', checks=[
self.check('customRules | length(@)', 0)
])
# create a custom rule
self.cmd('network application-gateway waf-policy custom-rule create -g {rg} '
'--policy-name {waf} -n {rule} '
'--priority 50 --action LOG --rule-type MatchRule',
checks=[
self.check('priority', 50),
self.check('ruleType', 'MatchRule'),
self.check('action', 'Log'),
self.check('matchConditions | length(@)', 0)
])
self.cmd('network application-gateway waf-policy show -g {rg} -n {waf}', checks=[
self.check('customRules | length(@)', 1)
])
self.cmd('network application-gateway waf-policy custom-rule show -g {rg} '
'--policy-name {waf} -n {rule}',
checks=[
self.check('priority', 50),
self.check('ruleType', 'MatchRule'),
self.check('action', 'Log'),
self.check('matchConditions | length(@)', 0)
])
# add match condition to the previous created custom rule
self.cmd('network application-gateway waf-policy custom-rule match-condition add -g {rg} '
'--policy-name {waf} -n {rule} '
'--match-variables RequestHeaders.value --operator contains --values foo boo --transform lowercase')
self.cmd('network application-gateway waf-policy custom-rule show -g {rg} '
'--policy-name {waf} -n {rule}',
checks=[
self.check('priority', 50),
self.check('ruleType', 'MatchRule'),
self.check('action', 'Log'),
self.check('matchConditions | length(@)', 1)
])
# update one of properties
self.cmd('network application-gateway waf-policy custom-rule update -g {rg} '
'--policy-name {waf} -n {rule} '
'--priority 75',
checks=self.check('priority', 75))
# add another match condition to the same custom rule
self.cmd('network application-gateway waf-policy custom-rule match-condition add -g {rg} '
'--policy-name {waf} -n {rule} '
'--match-variables RequestHeaders.value --operator contains '
'--values remove this --transform lowercase')
self.cmd('network application-gateway waf-policy custom-rule show -g {rg} '
'--policy-name {waf} -n {rule}',
checks=[
self.check('priority', 75),
self.check('ruleType', 'MatchRule'),
self.check('action', 'Log'),
self.check('matchConditions | length(@)', 2)
])
# remove one of match condition of custom rule
self.cmd('network application-gateway waf-policy custom-rule match-condition remove -g {rg} '
'--policy-name {waf} -n {rule} '
'--index 0')
self.cmd('network application-gateway waf-policy custom-rule show -g {rg} '
'--policy-name {waf} -n {rule}',
checks=[
self.check('priority', 75),
self.check('ruleType', 'MatchRule'),
self.check('action', 'Log'),
self.check('matchConditions | length(@)', 1)
])
@ResourceGroupPreparer(name_prefix='cli_test_app_gateway_waf_policy_setting_')
def test_network_app_gateway_waf_policy_setting(self, resource_group):
self.kwargs.update({
'waf': 'agp1',
'ag': 'ag1',
'rg': resource_group,
})
# check default policy setting values
self.cmd('network application-gateway waf-policy create -g {rg} -n {waf}', checks=[
self.check('policySettings.fileUploadLimitInMb', 100),
self.check('policySettings.maxRequestBodySizeInKb', 128),
self.check('policySettings.mode', 'Detection'),
self.check('policySettings.requestBodyCheck', True),
self.check('policySettings.state', 'Disabled')
])
# randomly update some properties
self.cmd('network application-gateway waf-policy policy-setting update -g {rg} --policy-name {waf} '
'--state Enabled --file-upload-limit-in-mb 64 --mode Prevention',
checks=[
self.check('policySettings.fileUploadLimitInMb', 64),
self.check('policySettings.maxRequestBodySizeInKb', 128),
self.check('policySettings.mode', 'Prevention'),
self.check('policySettings.requestBodyCheck', False),
self.check('policySettings.state', 'Enabled')
])
@ResourceGroupPreparer(name_prefix='cli_test_app_gateway_waf_policy_managed_rules_')
def test_network_app_gateway_waf_policy_managed_rules(self, resource_group):
self.kwargs.update({
'waf': 'agp1',
'ip': 'pip1',
'ag': 'ag1',
'rg': resource_group,
'csr_grp1': 'REQUEST-921-PROTOCOL-ATTACK',
'csr_grp2': 'REQUEST-913-SCANNER-DETECTION'
})
self.cmd('network application-gateway waf-policy create -g {rg} -n {waf}')
# case 1: Initialize(add) managed rule set
self.cmd('network application-gateway waf-policy managed-rule rule-set add -g {rg} --policy-name {waf} '
'--type OWASP --version 3.0 '
'--group-name {csr_grp1} --rules 921100 921110')
self.cmd('network application-gateway waf-policy show -g {rg} -n {waf}', checks=[
self.check('managedRules.managedRuleSets[0].ruleSetType', 'OWASP'),
self.check('managedRules.managedRuleSets[0].ruleSetVersion', '3.0'),
self.check('managedRules.managedRuleSets[0].ruleGroupOverrides[0].rules | length(@)', 2),
self.check('managedRules.managedRuleSets[0].ruleGroupOverrides[0].ruleGroupName', self.kwargs['csr_grp1']),
self.check('managedRules.managedRuleSets[0].ruleGroupOverrides[0].rules[0].ruleId', '921100')
])
# case 2: Append(add) another managed rule set to same rule group
self.cmd('network application-gateway waf-policy managed-rule rule-set add -g {rg} --policy-name {waf} '
'--type OWASP --version 3.0 '
'--group-name {csr_grp1} --rules 921150')
self.cmd('network application-gateway waf-policy managed-rule rule-set list -g {rg} --policy-name {waf}',
checks=[
self.check('managedRuleSets[0].ruleSetType', 'OWASP'),
self.check('managedRuleSets[0].ruleSetVersion', '3.0'),
self.check('managedRuleSets[0].ruleGroupOverrides[0].rules | length(@)', 3),
self.check('managedRuleSets[0].ruleGroupOverrides[0].ruleGroupName', self.kwargs['csr_grp1']),
self.check('managedRuleSets[0].ruleGroupOverrides[0].rules[2].ruleId', '921150')
])
# case 3: Add another managed rule set of different rule group
self.cmd('network application-gateway waf-policy managed-rule rule-set add -g {rg} --policy-name {waf} '
'--type OWASP --version 3.0 '
'--group-name {csr_grp2} --rules 913100')
self.cmd('network application-gateway waf-policy managed-rule rule-set list -g {rg} --policy-name {waf}',
checks=[
self.check('managedRuleSets[0].ruleSetType', 'OWASP'),
self.check('managedRuleSets[0].ruleSetVersion', '3.0'),
self.check('managedRuleSets[0].ruleGroupOverrides[1].rules | length(@)', 1),
self.check('managedRuleSets[0].ruleGroupOverrides[1].ruleGroupName', self.kwargs['csr_grp2']),
self.check('managedRuleSets[0].ruleGroupOverrides[1].rules[0].ruleId', '913100')
])
# case 4: override(update) existing managed rule set
self.cmd('network application-gateway waf-policy managed-rule rule-set update -g {rg} --policy-name {waf} '
'--type OWASP --version 3.0 '
'--group-name {csr_grp1} --rules 921100 921150')
self.cmd('network application-gateway waf-policy managed-rule rule-set list -g {rg} --policy-name {waf}',
checks=[
self.check('managedRuleSets[0].ruleSetType', 'OWASP'),
self.check('managedRuleSets[0].ruleSetVersion', '3.0'),
self.check('managedRuleSets[0].ruleGroupOverrides[0].rules | length(@)', 2),
self.check('managedRuleSets[0].ruleGroupOverrides[0].ruleGroupName', self.kwargs['csr_grp1']),
self.check('managedRuleSets[0].ruleGroupOverrides[0].rules[0].ruleId', '921100'),
self.check('managedRuleSets[0].ruleGroupOverrides[0].rules[1].ruleId', '921150'),
])
# case 5: clear manage rule set by group {csr_grp1} and only {csr_grp2} left
self.cmd('network application-gateway waf-policy managed-rule rule-set remove -g {rg} --policy-name {waf} '
'--type OWASP --version 3.1 '
'--group-name {csr_grp1} ')
self.cmd('network application-gateway waf-policy managed-rule rule-set list -g {rg} --policy-name {waf}', checks=[
self.check('managedRuleSets[0].ruleGroupOverrides | length(@)', 1),
self.check('managedRuleSets[0].ruleGroupOverrides[0].rules | length(@)', 1),
self.check('managedRuleSets[0].ruleGroupOverrides[0].ruleGroupName', self.kwargs['csr_grp2']),
self.check('managedRuleSets[0].ruleGroupOverrides[0].rules[0].ruleId', '913100'),
])
# case 6: change managed rules of OWASP from 3.1 to 3.0
self.cmd('network application-gateway waf-policy managed-rule rule-set update -g {rg} --policy-name {waf} '
'--type OWASP --version 3.1')
self.cmd('network application-gateway waf-policy managed-rule rule-set list -g {rg} --policy-name {waf}',
checks=[
self.check('managedRuleSets[0].ruleSetType', 'OWASP'),
self.check('managedRuleSets[0].ruleSetVersion', '3.1'),
self.check('managedRuleSets[0].ruleGroupOverrides | length(@)', 0)
])
# case 7: override existing rules with one rule for OWASP 3.1
self.cmd('network application-gateway waf-policy managed-rule rule-set update -g {rg} --policy-name {waf} '
'--type OWASP --version 3.1 '
'--group-name REQUEST-911-METHOD-ENFORCEMENT '
'--rules 911100')
self.cmd('network application-gateway waf-policy managed-rule rule-set list -g {rg} --policy-name {waf}', checks=[
self.check('managedRuleSets[0].ruleSetType', 'OWASP'),
self.check('managedRuleSets[0].ruleSetVersion', '3.1'),
self.check('managedRuleSets[0].ruleGroupOverrides | length(@)', 1),
self.check('managedRuleSets[0].ruleGroupOverrides[0].rules | length(@)', 1),
self.check('managedRuleSets[0].ruleGroupOverrides[0].rules[0].ruleId', '911100')
])
@ResourceGroupPreparer(name_prefix='cli_test_app_gateway_waf_policy_managed_rules_')
def test_network_app_gateway_waf_policy_with_version_and_type(self, resource_group):
self.kwargs.update({
'waf': 'agp1',
'ip': 'pip1',
'ag': 'ag1',
'rg': resource_group,
'csr_grp1': 'REQUEST-921-PROTOCOL-ATTACK',
'csr_grp2': 'REQUEST-913-SCANNER-DETECTION'
})
self.cmd('network application-gateway waf-policy create -g {rg} -n {waf} --version 3.1 --type OWASP')
# case 1: Initialize(add) managed rule set
self.cmd('network application-gateway waf-policy managed-rule rule-set add -g {rg} --policy-name {waf} '
'--type OWASP --version 3.1 '
'--group-name {csr_grp1} --rules 921120 921110')
self.cmd('network application-gateway waf-policy show -g {rg} -n {waf}', checks=[
self.check('managedRules.managedRuleSets[0].ruleSetType', 'OWASP'),
self.check('managedRules.managedRuleSets[0].ruleSetVersion', '3.1'),
self.check('managedRules.managedRuleSets[0].ruleGroupOverrides[0].rules | length(@)', 2),
self.check('managedRules.managedRuleSets[0].ruleGroupOverrides[0].ruleGroupName', self.kwargs['csr_grp1']),
self.check('managedRules.managedRuleSets[0].ruleGroupOverrides[0].rules[0].ruleId', '921120')
])
# case 2: Append(add) another managed rule set to same rule group
self.cmd('network application-gateway waf-policy managed-rule rule-set add -g {rg} --policy-name {waf} '
'--type OWASP --version 3.1 '
'--group-name {csr_grp1} --rules 921150')
self.cmd('network application-gateway waf-policy managed-rule rule-set list -g {rg} --policy-name {waf}',
checks=[
self.check('managedRuleSets[0].ruleSetType', 'OWASP'),
self.check('managedRuleSets[0].ruleSetVersion', '3.1'),
self.check('managedRuleSets[0].ruleGroupOverrides[0].rules | length(@)', 3),
self.check('managedRuleSets[0].ruleGroupOverrides[0].ruleGroupName', self.kwargs['csr_grp1']),
self.check('managedRuleSets[0].ruleGroupOverrides[0].rules[2].ruleId', '921150')
])
# # case 3: Add another managed rule set of different rule group
self.cmd('network application-gateway waf-policy managed-rule rule-set add -g {rg} --policy-name {waf} '
'--type OWASP --version 3.1 '
'--group-name {csr_grp2} --rules 913100')
self.cmd('network application-gateway waf-policy managed-rule rule-set list -g {rg} --policy-name {waf}',
checks=[
self.check('managedRuleSets[0].ruleSetType', 'OWASP'),
self.check('managedRuleSets[0].ruleSetVersion', '3.1'),
self.check('managedRuleSets[0].ruleGroupOverrides[1].rules | length(@)', 1),
self.check('managedRuleSets[0].ruleGroupOverrides[1].ruleGroupName', self.kwargs['csr_grp2']),
self.check('managedRuleSets[0].ruleGroupOverrides[1].rules[0].ruleId', '913100')
])
# case 4: override(update) existing managed rule set
self.cmd('network application-gateway waf-policy managed-rule rule-set update -g {rg} --policy-name {waf} '
'--type OWASP --version 3.1 '
'--group-name {csr_grp1} --rules 921130 921140')
self.cmd('network application-gateway waf-policy managed-rule rule-set list -g {rg} --policy-name {waf}',
checks=[
self.check('managedRuleSets[0].ruleSetType', 'OWASP'),
self.check('managedRuleSets[0].ruleSetVersion', '3.1'),
self.check('managedRuleSets[0].ruleGroupOverrides[0].rules | length(@)', 2),
self.check('managedRuleSets[0].ruleGroupOverrides[0].ruleGroupName', self.kwargs['csr_grp1']),
self.check('managedRuleSets[0].ruleGroupOverrides[0].rules[0].ruleId', '921130'),
self.check('managedRuleSets[0].ruleGroupOverrides[0].rules[1].ruleId', '921140')
])
# # case 5: clear manage rule set by group {csr_grp1}
self.cmd('network application-gateway waf-policy managed-rule rule-set remove -g {rg} --policy-name {waf} '
'--type OWASP --version 3.1 '
'--group-name {csr_grp1} ')
self.cmd('network application-gateway waf-policy managed-rule rule-set list -g {rg} --policy-name {waf}',
checks=[
self.check('managedRuleSets[0].ruleSetType', 'OWASP'),
self.check('managedRuleSets[0].ruleSetVersion', '3.1'),
self.check('managedRuleSets[0].ruleGroupOverrides[0].ruleGroupName', self.kwargs['csr_grp2'])
])
@ResourceGroupPreparer(name_prefix='cli_test_app_gateway_waf_policy_managed_rules_exclusion')
def test_network_app_gateway_waf_policy_managed_rules_exclusions(self, resource_group):
self.kwargs.update({
'waf': 'agp1',
'ip': 'pip1',
'ag': 'ag1',
'rg': resource_group
})
self.cmd('network application-gateway waf-policy create -g {rg} -n {waf}')
# add one exclusion rule
self.cmd('network application-gateway waf-policy managed-rule exclusion add -g {rg} --policy-name {waf} '
'--match-variable "RequestHeaderNames" --selector-match-operator "StartsWith" --selector "Bing"')
self.cmd('network application-gateway waf-policy managed-rule exclusion list -g {rg} --policy-name {waf}',
checks=[
self.check('exclusions | length(@)', 1)
])
# add another exclusion rule
self.cmd('network application-gateway waf-policy managed-rule exclusion add -g {rg} --policy-name {waf} '
'--match-variable "RequestHeaderNames" --selector-match-operator "Contains" --selector "Azure"')
self.cmd('network application-gateway waf-policy managed-rule exclusion list -g {rg} --policy-name {waf}',
checks=[
self.check('exclusions | length(@)', 2)
])
# clear all exclusion rules
self.cmd('network application-gateway waf-policy managed-rule exclusion remove -g {rg} --policy-name {waf} ')
self.cmd('network application-gateway waf-policy managed-rule exclusion list -g {rg} --policy-name {waf}',
checks=[
self.check('exclusions | length(@)', 0)
])
class NetworkDdosProtectionScenarioTest(LiveScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_ddos_protection')
def test_network_ddos_protection_plan(self, resource_group):
self.kwargs.update({
'vnet1': 'vnet1',
'vnet2': 'vnet2',
'ddos': 'ddos1'
})
self.cmd('network vnet create -g {rg} -n {vnet1}')
self.kwargs['vnet2_id'] = self.cmd('network vnet create -g {rg} -n {vnet2}').get_output_in_json()['newVNet']['id']
# can be attached through DDoS create
self.kwargs['ddos_id'] = self.cmd('network ddos-protection create -g {rg} -n {ddos} --vnets {vnet1} {vnet2_id} --tags foo=doo').get_output_in_json()['id']
self.cmd('network ddos-protection show -g {rg} -n {ddos}')
# can be detached through VNet update
self.cmd('network vnet update -g {rg} -n {vnet1} --ddos-protection-plan ""')
self.cmd('network vnet update -g {rg} -n {vnet2} --ddos-protection-plan ""')
self.cmd('network ddos-protection show -g {rg} -n {ddos}')
# can be attached through VNet update
self.cmd('network vnet update -g {rg} -n {vnet1} --ddos-protection-plan {ddos}')
self.cmd('network vnet update -g {rg} -n {vnet2} --ddos-protection-plan {ddos_id}')
self.cmd('network ddos-protection show -g {rg} -n {ddos}')
# can be detached through DDoS update
self.cmd('network ddos-protection update -g {rg} -n {ddos} --tags doo=foo --vnets ""')
self.cmd('network ddos-protection show -g {rg} -n {ddos}')
# can be attached through DDoS update
self.cmd('network ddos-protection update -g {rg} -n {ddos} --vnets {vnet2_id} --tags foo=boo')
self.cmd('network ddos-protection show -g {rg} -n {ddos}')
self.cmd('network ddos-protection list -g {rg}')
with self.assertRaises(Exception):
self.cmd('network ddos-protection delete -g {rg} -n {ddos}')
# remove all vnets and retry
self.cmd('network ddos-protection update -g {rg} -n {ddos} --vnets ""')
self.cmd('network ddos-protection delete -g {rg} -n {ddos}')
class NetworkPublicIpScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_public_ip')
def test_network_public_ip(self, resource_group):
self.kwargs.update({
'ip1': 'pubipdns',
'ip2': 'pubipnodns',
'dns': 'woot'
})
self.cmd('network public-ip create -g {rg} -n {ip1} --dns-name {dns} --allocation-method static', checks=[
self.check('publicIp.provisioningState', 'Succeeded'),
self.check('publicIp.publicIpAllocationMethod', 'Static'),
self.check('publicIp.dnsSettings.domainNameLabel', '{dns}')
])
self.cmd('network public-ip create -g {rg} -n {ip2}', checks=[
self.check('publicIp.provisioningState', 'Succeeded'),
self.check('publicIp.publicIpAllocationMethod', 'Dynamic'),
self.check('publicIp.dnsSettings', None)
])
self.cmd('network public-ip update -g {rg} -n {ip2} --allocation-method static --dns-name wowza2 --idle-timeout 10 --tags foo=doo', checks=[
self.check('publicIpAllocationMethod', 'Static'),
self.check('dnsSettings.domainNameLabel', 'wowza2'),
self.check('idleTimeoutInMinutes', 10),
self.check('tags.foo', 'doo')
])
self.cmd('network public-ip list -g {rg}', checks=[
self.check('type(@)', 'array'),
self.check("length([?resourceGroup == '{rg}']) == length(@)", True)
])
self.cmd('network public-ip show -g {rg} -n {ip1}', checks=[
self.check('type(@)', 'object'),
self.check('name', '{ip1}'),
self.check('resourceGroup', '{rg}')
])
self.cmd('network public-ip delete -g {rg} -n {ip1}')
self.cmd('network public-ip list -g {rg}',
checks=self.check("length[?name == '{ip1}']", None))
@ResourceGroupPreparer(name_prefix='cli_test_public_ip_zone', location='eastus2')
def test_network_public_ip_zone(self, resource_group):
self.cmd('network public-ip create -g {rg} -n ip --sku Standard -z 1 2 3', checks=[
self.check('length(publicIp.zones)', 3)
])
class NetworkZonedPublicIpScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_zoned_public_ip')
def test_network_zoned_public_ip(self, resource_group):
self.kwargs['ip'] = 'pubip'
self.cmd('network public-ip create -g {rg} -n {ip} -l centralus -z 2',
checks=self.check('publicIp.zones[0]', '2'))
table_output = self.cmd('network public-ip show -g {rg} -n {ip} -otable').output
self.assertEqual(table_output.splitlines()[2].split(), ['pubip', resource_group, 'centralus', '2', 'IPv4', 'Dynamic', '4', 'Succeeded'])
class NetworkRouteFilterScenarioTest(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_network_route_filter')
def test_network_route_filter(self, resource_group):
self.kwargs['filter'] = 'filter1'
self.kwargs['rg'] = resource_group
self.cmd('network route-filter create -g {rg} -n {filter} --tags foo=doo')
self.cmd('network route-filter update -g {rg} -n {filter}')
self.cmd('network route-filter show -g {rg} -n {filter}')
self.cmd('network route-filter list -g {rg}')
# temporalily disable this test
# self.cmd('network route-filter rule list-service-communities')
self.cmd('network route-filter rule create -g {rg} --filter-name {filter} -n rule1 --communities 12076:5040 12076:5030 --access allow')
self.cmd('network route-filter rule update -g {rg} --filter-name {filter} -n rule1 --set access=Deny')
self.cmd('network route-filter rule show -g {rg} --filter-name {filter} -n rule1')
self.cmd('network route-filter rule list -g {rg} --filter-name {filter}')
self.cmd('network route-filter rule delete -g {rg} --filter-name {filter} -n rule1')
self.cmd('network route-filter delete -g {rg} -n {filter}')
class NetworkExpressRouteScenarioTest(ScenarioTest):
def _test_express_route_peering(self):
def _create_peering(peering, peer_asn, vlan, primary_prefix, secondary_prefix):
self.kwargs.update({
'peering': peering,
'asn': peer_asn,
'vlan': vlan,
'pri_prefix': primary_prefix,
'sec_prefix': secondary_prefix
})
self.cmd('network express-route peering create -g {rg} --circuit-name {er} --peering-type {peering} --peer-asn {asn} --vlan-id {vlan} --primary-peer-subnet {pri_prefix} --secondary-peer-subnet {sec_prefix}')
# create private peerings
_create_peering('AzurePrivatePeering', 10001, 101, '102.0.0.0/30', '103.0.0.0/30')
self.cmd('network express-route peering create -g {rg} --circuit-name {er} --peering-type MicrosoftPeering --peer-asn 10002 --vlan-id 103 --primary-peer-subnet 104.0.0.0/30 --secondary-peer-subnet 105.0.0.0/30 --advertised-public-prefixes 104.0.0.0/30 --customer-asn 10000 --routing-registry-name level3')
self.cmd('network express-route peering show -g {rg} --circuit-name {er} -n MicrosoftPeering', checks=[
self.check('microsoftPeeringConfig.advertisedPublicPrefixes[0]', '104.0.0.0/30'),
self.check('microsoftPeeringConfig.customerAsn', 10000),
self.check('microsoftPeeringConfig.routingRegistryName', 'LEVEL3')
])
self.cmd('network express-route peering delete -g {rg} --circuit-name {er} -n MicrosoftPeering')
self.cmd('network express-route peering list --resource-group {rg} --circuit-name {er}',
checks=self.check('length(@)', 1))
self.cmd('network express-route peering update -g {rg} --circuit-name {er} -n AzurePrivatePeering --set vlanId=200',
checks=self.check('vlanId', 200))
def _test_express_route_auth(self):
self.cmd('network express-route auth create -g {rg} --circuit-name {er} -n auth1',
checks=self.check('authorizationUseStatus', 'Available'))
self.cmd('network express-route auth list --resource-group {rg} --circuit-name {er}',
checks=self.check('length(@)', 1))
self.cmd('network express-route auth show -g {rg} --circuit-name {er} -n auth1',
checks=self.check('authorizationUseStatus', 'Available'))
self.cmd('network express-route auth delete -g {rg} --circuit-name {er} -n auth1')
self.cmd('network express-route auth list --resource-group {rg} --circuit-name {er}', checks=self.is_empty())
@ResourceGroupPreparer(name_prefix='cli_test_express_route')
def test_network_express_route(self, resource_group):
self.kwargs = {
'rg': resource_group,
'er': 'circuit1',
'rt': 'Microsoft.Network/expressRouteCircuits'
}
self.cmd('network express-route list-service-providers', checks=[
self.check('type(@)', 'array'),
self.check("length([?type == 'Microsoft.Network/expressRouteServiceProviders']) == length(@)", True)
])
# Premium SKU required to create MicrosoftPeering settings
self.cmd('network express-route create -g {rg} -n {er} --bandwidth 50 --provider "Ibiza Test Provider" --peering-location Area51 --sku-tier Premium --tags foo=doo')
self.cmd('network express-route list', checks=[
self.check('type(@)', 'array'),
self.check("length([?type == '{rt}']) == length(@)", True)
])
self.cmd('network express-route list --resource-group {rg}', checks=[
self.check('type(@)', 'array'),
self.check("length([?type == '{rt}']) == length(@)", True),
self.check("length([?resourceGroup == '{rg}']) == length(@)", True)
])
self.cmd('network express-route show --resource-group {rg} --name {er}', checks=[
self.check('type(@)', 'object'),
self.check('type', '{rt}'),
self.check('name', '{er}'),
self.check('resourceGroup', '{rg}'),
self.check('tags.foo', 'doo')
])
self.cmd('network express-route get-stats --resource-group {rg} --name {er}',
checks=self.check('type(@)', 'object'))
self.cmd('network express-route update -g {rg} -n {er} --set tags.test=Test', checks=[
self.check('tags.test', 'Test')
])
self.cmd('network express-route update -g {rg} -n {er} --tags foo=boo',
checks=self.check('tags.foo', 'boo'))
self._test_express_route_auth()
self._test_express_route_peering()
# because the circuit isn't actually provisioned, these commands will not return anything useful
# so we will just verify that the command makes it through the SDK without error.
self.cmd('network express-route list-arp-tables --resource-group {rg} --name {er} --peering-name azureprivatepeering --path primary')
self.cmd('network express-route list-route-tables --resource-group {rg} --name {er} --peering-name azureprivatepeering --path primary')
self.cmd('network express-route delete --resource-group {rg} --name {er}')
# Expecting no results as we just deleted the only express route in the resource group
self.cmd('network express-route list --resource-group {rg}', checks=self.is_empty())
with self.assertRaisesRegexp(CLIError, 'Please provide a complete resource ID'):
self.cmd('network express-route gateway connection show --ids /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myrg/providers/Microsoft.Network/expressRouteGateways/aaa')
@record_only()
@ResourceGroupPreparer(name_prefix='cli_test_express_route')
def test_network_express_route_connection_routing_configuration(self, resource_group):
self.kwargs = {
'rg': 'dedharrtv3final',
'gw': '16297a6ff5314c0f8d0eb580aa7861b3-eastus-er-gw',
'connection': 'yuerconnection',
'peering': '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/dedharrtv3final/providers/Microsoft.Network/expressRouteCircuits/clicktfinal/peerings/AzurePrivatePeering',
'route_table1': '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/dedharrtv3final/providers/Microsoft.Network/virtualHubs/blhub/hubRouteTables/routetable1',
'route_table2': '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/dedharrtv3final/providers/Microsoft.Network/virtualHubs/blhub/hubRouteTables/routetable2'
}
self.cmd('network express-route gateway connection update '
'-n {connection} '
'-g {rg} '
'--gateway-name {gw} '
'--peering {peering} '
'--associated-route-table {route_table1} '
'--propagated-route-tables {route_table1} {route_table2} '
'--labels label1 label2',
checks=[
self.check('provisioningState', 'Succeeded'),
self.check('name', self.kwargs['connection']),
self.check('routingConfiguration.associatedRouteTable.id', self.kwargs['route_table1']),
self.check('length(routingConfiguration.propagatedRouteTables.ids)', 2),
self.check('routingConfiguration.propagatedRouteTables.ids[0].id', self.kwargs['route_table1']),
self.check('routingConfiguration.propagatedRouteTables.ids[1].id', self.kwargs['route_table2']),
self.check('length(routingConfiguration.propagatedRouteTables.labels)', 2),
self.check('routingConfiguration.propagatedRouteTables.labels[0]', 'label1'),
self.check('routingConfiguration.propagatedRouteTables.labels[1]', 'label2')])
self.cmd('network express-route gateway connection show -n {connection} -g {rg} --gateway-name {gw}', checks=[
self.check('provisioningState', 'Succeeded'),
self.check('name', self.kwargs['connection']),
self.check('routingConfiguration.associatedRouteTable.id', self.kwargs['route_table1']),
self.check('length(routingConfiguration.propagatedRouteTables.ids)', 2),
self.check('routingConfiguration.propagatedRouteTables.ids[0].id', self.kwargs['route_table1']),
self.check('routingConfiguration.propagatedRouteTables.ids[1].id', self.kwargs['route_table2']),
self.check('length(routingConfiguration.propagatedRouteTables.labels)', 2),
self.check('routingConfiguration.propagatedRouteTables.labels[0]', 'label1'),
self.check('routingConfiguration.propagatedRouteTables.labels[1]', 'label2')
])
class NetworkExpressRoutePortScenarioTest(ScenarioTest):
def __init__(self, method_name):
super().__init__(method_name, recording_processors=[
ExpressRoutePortLOAContentReplacer()
])
def test_network_express_route_port_identity(self):
"""
Since the resource ExpressRoute Port is rare currently, it's very expensive to write test.
We run test manually for now. Any changes related to this command, please contract to Service team for help.
For ussage, run `az network express-route port identity --help` to get help.
"""
pass
def test_network_express_route_port_config_macsec(self):
"""
Since the resource ExpressRoute Port is rare currently, it's very expensive to write test.
We run test manually for now. Any changes related to this command, please contract to Service team for help.
For ussage, run `az network express-route port link update --help` to get help.
"""
pass
def test_network_express_route_port_config_adminstate(self):
"""
Since the resource ExpressRoute Port is rare currently, it's very expensive to write test.
We run test manually for now. Any changes related to this command, please contract to Service team for help.
For ussage, run `az network express-route port link update --help` to get help.
"""
pass
@record_only()
@AllowLargeResponse()
def test_network_express_route_port_generate_loa(self):
"""
The ExpressRoutePort comes from service team and located in a different subscription. And it will be revoked after this feature.
So, this test is record only.
"""
self.kwargs.update({
'rg': 'ER-AutoTriage-RG',
'er_port': 'ER-autotriage-erdirect',
})
self.cmd('network express-route port generate-loa --customer-name MyCustomer -g {rg} --name {er_port} -f loa1')
class NetworkExpressRouteIPv6PeeringScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_express_route_ipv6_peering')
def test_network_express_route_ipv6_peering(self, resource_group):
self.kwargs['er'] = 'circuit1'
# Premium SKU required to create MicrosoftPeering settings
self.cmd('network express-route create -g {rg} -n {er} --bandwidth 50 --provider "Ibiza Test Provider" --peering-location Area51 --sku-tier Premium')
self.cmd('network express-route peering create -g {rg} --circuit-name {er} --peering-type MicrosoftPeering --peer-asn 10002 --vlan-id 103 --primary-peer-subnet 104.0.0.0/30 --secondary-peer-subnet 105.0.0.0/30 --advertised-public-prefixes 104.0.0.0/30 --customer-asn 10000 --routing-registry-name level3')
self.cmd('network express-route peering update -g {rg} --circuit-name {er} -n MicrosoftPeering --ip-version ipv6 --primary-peer-subnet 2001:db00::/126 --secondary-peer-subnet 2002:db00::/126 --advertised-public-prefixes 2001:db00::/126 --customer-asn 100001 --routing-registry-name level3')
self.cmd('network express-route peering show -g {rg} --circuit-name {er} -n MicrosoftPeering', checks=[
self.check('microsoftPeeringConfig.advertisedPublicPrefixes[0]', '104.0.0.0/30'),
self.check('microsoftPeeringConfig.customerAsn', 10000),
self.check('microsoftPeeringConfig.routingRegistryName', 'LEVEL3'),
self.check('ipv6PeeringConfig.microsoftPeeringConfig.advertisedPublicPrefixes[0]', '2001:db00::/126'),
self.check('ipv6PeeringConfig.microsoftPeeringConfig.customerAsn', 100001),
self.check('ipv6PeeringConfig.state', 'Enabled')
])
@ResourceGroupPreparer(name_prefix='cli_test_express_route_ipv6_peering2', location='eastus')
def test_network_express_route_ipv6_peering2(self, resource_group):
self.kwargs['er'] = 'test_circuit'
# create with ipv6
self.cmd('network express-route create -g {rg} -n {er} --bandwidth 50 --provider "Ibiza Test Provider" '
'--peering-location Area51 --sku-tier Premium')
self.cmd('network express-route peering create -g {rg} --circuit-name {er} --peering-type AzurePrivatePeering '
'--peer-asn 10002 --vlan-id 103 --ip-version ipv6 --primary-peer-subnet 2002:db00::/126 '
'--secondary-peer-subnet 2003:db00::/126',
checks=[self.check('ipv6PeeringConfig.primaryPeerAddressPrefix', '2002:db00::/126'),
self.check('ipv6PeeringConfig.secondaryPeerAddressPrefix', '2003:db00::/126')])
class NetworkExpressRouteGlobalReachScenarioTest(ScenarioTest):
@record_only() # record_only as the express route is extremely expensive, contact service team for an available ER
@ResourceGroupPreparer(name_prefix='cli_test_express_route_global_reach')
def test_network_express_route_global_reach(self, resource_group):
from azure.core.exceptions import HttpResponseError
self.kwargs.update({
'er1': 'er1',
'er2': 'er2',
'conn12': 'conn12',
})
self.cmd('network express-route create -g {rg} -n {er1} --allow-global-reach --bandwidth 50 --peering-location Area51 --provider "Microsoft ER Test" --sku-tier Premium')
self.cmd('network express-route peering create -g {rg} --circuit-name {er1} --peering-type AzurePrivatePeering --peer-asn 10001 --vlan-id 101 --primary-peer-subnet 102.0.0.0/30 --secondary-peer-subnet 103.0.0.0/30')
self.cmd('network express-route create -g {rg} -n {er2} --allow-global-reach --bandwidth 50 --peering-location "Denver Test" --provider "Test Provider NW" --sku-tier Premium')
self.cmd('network express-route peering create -g {rg} --circuit-name {er2} --peering-type AzurePrivatePeering --peer-asn 10002 --vlan-id 102 --primary-peer-subnet 104.0.0.0/30 --secondary-peer-subnet 105.0.0.0/30')
# These commands won't succeed because circuit creation requires a manual step from the service.
with self.assertRaisesRegexp(HttpResponseError, 'is Not Provisioned'):
self.cmd('network express-route peering connection create -g {rg} --circuit-name {er1} --peering-name AzurePrivatePeering -n {conn12} --peer-circuit {er2} --address-prefix 104.0.0.0/29')
self.cmd('network express-route peering connection show -g {rg} --circuit-name {er1} --peering-name AzurePrivatePeering -n {conn12}')
self.cmd('network express-route peering connection delete -g {rg} --circuit-name {er1} --peering-name AzurePrivatePeering -n {conn12}')
@record_only() # record_only as the express route is extremely expensive, contact service team for an available ER
@ResourceGroupPreparer(name_prefix='cli_test_express_route_peer_connection')
def test_network_express_route_peer_connection(self, resource_group):
from msrestazure.azure_exceptions import CloudError
self.kwargs.update({
'er1': 'er1',
'er2': 'er2',
'peconn12': 'peconn12',
})
self.cmd('network express-route create -g {rg} -n {er1} --allow-global-reach --bandwidth 50 --peering-location Area51 --provider "Microsoft ER Test" --sku-tier Premium')
self.cmd('network express-route peering create -g {rg} --circuit-name {er1} --peering-type AzurePrivatePeering --peer-asn 10001 --vlan-id 101 --primary-peer-subnet 102.0.0.0/30 --secondary-peer-subnet 103.0.0.0/30')
self.cmd('network express-route create -g {rg} -n {er2} --allow-global-reach --bandwidth 50 --peering-location "Denver Test" --provider "Test Provider NW" --sku-tier Premium')
self.cmd('network express-route peering create -g {rg} --circuit-name {er2} --peering-type AzurePrivatePeering --peer-asn 10002 --vlan-id 102 --primary-peer-subnet 104.0.0.0/30 --secondary-peer-subnet 105.0.0.0/30')
# cannot create it, so this test will fail due to resource is not found.
with self.assertRaisesRegexp(SystemExit, '3'):
self.cmd('network express-route peering peer-connection show -g {rg} --circuit-name {er1} --peering-name AzurePrivatePeering -n {peconn12}')
self.cmd('network express-route peering peer-connection list -g {rg} --circuit-name {er1} --peering-name AzurePrivatePeering')
class NetworkCrossRegionLoadBalancerScenarioTest(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_cr_load_balancer')
def test_network_cross_region_lb(self, resource_group):
self.kwargs.update({
'lb': 'cross-region-lb',
'rg': resource_group,
'rt': 'Microsoft.Network/loadBalancers',
'pub_ip': 'publicip4'
})
# test lb create with min params (new ip)
self.cmd('network cross-region-lb create -n {lb}1 -g {rg}', checks=[
self.check('loadBalancer.frontendIPConfigurations[0].properties.privateIPAllocationMethod', 'Dynamic'),
self.check('loadBalancer.frontendIPConfigurations[0].resourceGroup', '{rg}')
])
# test internet facing load balancer with new static public IP
self.cmd('network cross-region-lb create -n {lb}2 -g {rg} --public-ip-address-allocation static --tags foo=doo')
self.cmd('network public-ip show -g {rg} -n PublicIP{lb}2', checks=[
self.check('publicIpAllocationMethod', 'Static'),
self.check('tags.foo', 'doo')
])
# test internet facing load balancer with existing public IP (by name)
self.cmd('network public-ip create -n {pub_ip} -g {rg} --sku Standard --tier Global')
self.cmd('network cross-region-lb create -n {lb}3 -g {rg} --public-ip-address {pub_ip}', checks=[
self.check('loadBalancer.frontendIPConfigurations[0].properties.privateIPAllocationMethod', 'Dynamic'),
self.check('loadBalancer.frontendIPConfigurations[0].resourceGroup', '{rg}'),
self.check("loadBalancer.frontendIPConfigurations[0].properties.publicIPAddress.contains(id, '{pub_ip}')", True)
])
self.cmd('network cross-region-lb list', checks=[
self.check('type(@)', 'array'),
self.check("length([?type == '{rt}']) == length(@)", True)
])
self.cmd('network cross-region-lb list --resource-group {rg}', checks=[
self.check('type(@)', 'array'),
self.check("length([?type == '{rt}']) == length(@)", True),
self.check("length([?resourceGroup == '{rg}']) == length(@)", True)
])
self.cmd('network cross-region-lb show --resource-group {rg} --name {lb}1', checks=[
self.check('type(@)', 'object'),
self.check('type', '{rt}'),
self.check('resourceGroup', '{rg}'),
self.check('name', '{lb}1')
])
self.cmd('network cross-region-lb delete --resource-group {rg} --name {lb}1')
# Expecting no results as we just deleted the only lb in the resource group
self.cmd('network cross-region-lb list --resource-group {rg}', checks=self.check('length(@)', 2))
@ResourceGroupPreparer(name_prefix='cli_test_cross_region_load_balancer_ip_config')
def test_network_cross_region_load_balancer_ip_config(self, resource_group):
for i in range(1, 4): # create 3 public IPs to use for the test
self.cmd('network public-ip create -g {{rg}} -n publicip{} --sku Standard'.format(i))
# create internet-facing LB with public IP (lb1)
self.cmd('network cross-region-lb create -g {rg} -n lb1 --public-ip-address publicip1')
# Test frontend IP configuration for internet-facing LB
self.cmd('network cross-region-lb frontend-ip create -g {rg} --lb-name lb1 -n ipconfig1 --public-ip-address publicip2')
self.cmd('network cross-region-lb frontend-ip list -g {rg} --lb-name lb1',
checks=self.check('length(@)', 2))
self.cmd('network cross-region-lb frontend-ip update -g {rg} --lb-name lb1 -n ipconfig1 --public-ip-address publicip3')
self.cmd('network cross-region-lb frontend-ip show -g {rg} --lb-name lb1 -n ipconfig1',
checks=self.check("publicIpAddress.contains(id, 'publicip3')", True))
# test generic update
self.kwargs['ip2_id'] = resource_id(subscription=self.get_subscription_id(), resource_group=self.kwargs['rg'], namespace='Microsoft.Network', type='publicIPAddresses', name='publicip2')
self.cmd('network cross-region-lb frontend-ip update -g {rg} --lb-name lb1 -n ipconfig1 --set publicIpAddress.id="{ip2_id}"',
checks=self.check("publicIpAddress.contains(id, 'publicip2')", True))
self.cmd('network cross-region-lb frontend-ip delete -g {rg} --lb-name lb1 -n ipconfig1')
self.cmd('network cross-region-lb frontend-ip list -g {rg} --lb-name lb1',
checks=self.check('length(@)', 1))
@ResourceGroupPreparer(name_prefix='cli_test_cross_region_lb_address_pool', location='eastus2')
def test_network_cross_region_lb_address_pool(self, resource_group):
self.kwargs.update({
'lb': self.create_random_name('cross_region_lb', 24),
'rg': resource_group
})
self.cmd('network cross-region-lb create -g {rg} -n {lb}')
for i in range(1, 4):
self.cmd('network cross-region-lb address-pool create -g {{rg}} --lb-name {{lb}} -n bap{}'.format(i),
checks=self.check('name', 'bap{}'.format(i)))
self.cmd('network lb address-pool list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 4))
self.cmd('network lb address-pool show -g {rg} --lb-name {lb} -n bap1',
checks=self.check('name', 'bap1'))
self.cmd('network lb address-pool delete -g {rg} --lb-name {lb} -n bap1',
checks=self.is_empty())
self.cmd('network lb address-pool list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 3))
@ResourceGroupPreparer(name_prefix='cli_test_cross_region_lb_address_pool_addresses', location='eastus2')
def test_network_cross_region_lb_address_pool_addresses(self, resource_group):
self.kwargs.update({
'regional_lb1': self.create_random_name('regional_lb', 24),
'regional_lb2': self.create_random_name('regional_lb', 24),
'lb': self.create_random_name('cross_region_lb', 24),
'address_pool': self.create_random_name('address_pool', 24),
'backend_address1': self.create_random_name('backend_address', 24),
'backend_address2': self.create_random_name('backend_address', 24),
'rg': resource_group,
'lb_address_pool_file_path': os.path.join(TEST_DIR, 'test-cross-region-lb-address-pool-config.json')
})
regional_lb_frontend_ip_address1 = self.cmd('network lb create -n {regional_lb1} -g {rg} --sku Standard').get_output_in_json()['loadBalancer']['frontendIPConfigurations'][0]['id']
regional_lb_frontend_ip_address2 = self.cmd('network lb create -n {regional_lb2} -g {rg} --sku Standard').get_output_in_json()['loadBalancer']['frontendIPConfigurations'][0]['id']
self.kwargs.update({
'regional_lb_frontend_ip_address1': regional_lb_frontend_ip_address1,
'regional_lb_frontend_ip_address2': regional_lb_frontend_ip_address2
})
self.cmd('network cross-region-lb create -g {rg} -n {lb}')
self.cmd('network cross-region-lb address-pool create -g {rg} --lb-name {lb} -n {address_pool} '
'--backend-address name={backend_address1} frontend-ip-address={regional_lb_frontend_ip_address1} ',
checks=self.check('name', self.kwargs['address_pool']))
self.cmd('network cross-region-lb address-pool address add -g {rg} --lb-name {lb} --pool-name {address_pool} --name {backend_address2} --frontend-ip-address {regional_lb_frontend_ip_address2}', checks=self.check('name', self.kwargs['address_pool']))
self.cmd('network cross-region-lb address-pool address remove -g {rg} --lb-name {lb} --pool-name {address_pool} --name {backend_address2}', checks=self.check('name', self.kwargs['address_pool']))
self.cmd('network cross-region-lb address-pool address list -g {rg} --lb-name {lb} --pool-name {address_pool}', checks=self.check('length(@)', 1))
self.cmd('network cross-region-lb address-pool list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 2))
self.cmd('network cross-region-lb address-pool show -g {rg} --lb-name {lb} -n {address_pool}',
checks=self.check('name', self.kwargs['address_pool']))
self.cmd('network cross-region-lb address-pool delete -g {rg} --lb-name {lb} -n {address_pool}',
checks=self.is_empty())
self.cmd('network cross-region-lb address-pool list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 1))
@ResourceGroupPreparer(name_prefix='cli_test_cross_region_lb_probes', location='eastus2')
def test_network_cross_region_lb_probes(self, resource_group):
self.kwargs['lb'] = 'lb1'
self.kwargs['lb2'] = 'lb2'
self.cmd('network cross-region-lb create -g {rg} -n {lb}')
for i in range(1, 4):
self.cmd('network cross-region-lb probe create -g {{rg}} --lb-name {{lb}} -n probe{0} --port {0} --protocol http --path "/test{0}"'.format(i))
self.cmd('network lb probe list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 3))
self.cmd('network cross-region-lb probe update -g {rg} --lb-name {lb} -n probe1 --interval 20 --threshold 5')
self.cmd('network cross-region-lb probe update -g {rg} --lb-name {lb} -n probe2 --protocol tcp --path ""')
self.cmd('network cross-region-lb probe show -g {rg} --lb-name {lb} -n probe1', checks=[
self.check('intervalInSeconds', 20),
self.check('numberOfProbes', 5)
])
# test generic update
self.cmd('network cross-region-lb probe update -g {rg} --lb-name {lb} -n probe1 --set intervalInSeconds=15 --set numberOfProbes=3', checks=[
self.check('intervalInSeconds', 15),
self.check('numberOfProbes', 3)
])
self.cmd('network cross-region-lb probe show -g {rg} --lb-name {lb} -n probe2', checks=[
self.check('protocol', 'Tcp'),
self.check('path', None)
])
self.cmd('network cross-region-lb probe delete -g {rg} --lb-name {lb} -n probe3')
self.cmd('network cross-region-lb probe list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 2))
# test standard LB supports https probe
self.cmd('network cross-region-lb create -g {rg} -n {lb2}')
self.cmd('network cross-region-lb probe create -g {rg} --lb-name {lb2} -n probe1 --port 443 --protocol https --path "/test1"')
self.cmd('network cross-region-lb probe list -g {rg} --lb-name {lb2}', checks=self.check('[0].protocol', 'Https'))
@ResourceGroupPreparer(name_prefix='cli_test_cross_region_lb_rules')
def test_network_cross_region_lb_rules(self, resource_group):
self.kwargs['lb'] = 'lb1'
self.cmd('network cross-region-lb create -g {rg} -n {lb}')
self.cmd('network cross-region-lb rule create -g {rg} --lb-name {lb} -n rule2 --frontend-port 60 --backend-port 60 --protocol tcp')
self.cmd('network cross-region-lb address-pool create -g {rg} --lb-name {lb} -n bap1')
self.cmd('network cross-region-lb address-pool create -g {rg} --lb-name {lb} -n bap2')
self.cmd('network cross-region-lb rule create -g {rg} --lb-name {lb} -n rule1 --frontend-ip-name LoadBalancerFrontEnd --frontend-port 40 --backend-pool-name bap1 --backend-port 40 --protocol tcp')
self.cmd('network cross-region-lb rule list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 2))
self.cmd('network cross-region-lb rule update -g {rg} --lb-name {lb} -n rule1 --floating-ip true --idle-timeout 20 --load-distribution sourceip --protocol udp')
self.cmd('network cross-region-lb rule update -g {rg} --lb-name {lb} -n rule2 --backend-pool-name bap2 --load-distribution sourceipprotocol')
self.cmd('network cross-region-lb rule show -g {rg} --lb-name {lb} -n rule1', checks=[
self.check('enableFloatingIp', True),
self.check('idleTimeoutInMinutes', 20),
self.check('loadDistribution', 'SourceIP'),
self.check('protocol', 'Udp')
])
# test generic update
self.cmd('network cross-region-lb rule update -g {rg} --lb-name {lb} -n rule1 --set idleTimeoutInMinutes=5',
checks=self.check('idleTimeoutInMinutes', 5))
self.cmd('network cross-region-lb rule show -g {rg} --lb-name {lb} -n rule2', checks=[
self.check("backendAddressPool.contains(id, 'bap2')", True),
self.check('loadDistribution', 'SourceIPProtocol')
])
self.cmd('network cross-region-lb rule delete -g {rg} --lb-name {lb} -n rule1')
self.cmd('network cross-region-lb rule delete -g {rg} --lb-name {lb} -n rule2')
self.cmd('network cross-region-lb rule list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 0))
class NetworkLoadBalancerScenarioTest(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_load_balancer', location='eastus2')
def test_network_lb(self, resource_group):
self.kwargs.update({
'lb': 'lb',
'rt': 'Microsoft.Network/loadBalancers',
'vnet': 'mytestvnet',
'pri_ip': '10.0.0.15',
'pub_ip': 'publicip4'
})
# test lb create with min params (new ip)
self.cmd('network lb create -n {lb}1 -g {rg}', checks=[
self.check('loadBalancer.frontendIPConfigurations[0].properties.privateIPAllocationMethod', 'Dynamic'),
self.check('loadBalancer.frontendIPConfigurations[0].resourceGroup', '{rg}')
])
# test internet facing load balancer with new static public IP
self.cmd('network lb create -n {lb}2 -g {rg} --public-ip-address-allocation static --tags foo=doo')
self.cmd('network public-ip show -g {rg} -n PublicIP{lb}2', checks=[
self.check('publicIpAllocationMethod', 'Static'),
self.check('tags.foo', 'doo')
])
# test internal load balancer create (existing subnet ID)
self.kwargs['subnet_id'] = self.cmd('network vnet create -n {vnet} -g {rg} --subnet-name default').get_output_in_json()['newVNet']['subnets'][0]['id']
self.cmd('network lb create -n {lb}3 -g {rg} --subnet {subnet_id} --private-ip-address {pri_ip}', checks=[
self.check('loadBalancer.frontendIPConfigurations[0].properties.privateIPAllocationMethod', 'Static'),
self.check('loadBalancer.frontendIPConfigurations[0].properties.privateIPAddress', '{pri_ip}'),
self.check('loadBalancer.frontendIPConfigurations[0].resourceGroup', '{rg}'),
self.check("loadBalancer.frontendIPConfigurations[0].properties.subnet.id", '{subnet_id}')
])
# test internet facing load balancer with existing public IP (by name)
self.cmd('network public-ip create -n {pub_ip} -g {rg}')
self.cmd('network lb create -n {lb}4 -g {rg} --public-ip-address {pub_ip}', checks=[
self.check('loadBalancer.frontendIPConfigurations[0].properties.privateIPAllocationMethod', 'Dynamic'),
self.check('loadBalancer.frontendIPConfigurations[0].resourceGroup', '{rg}'),
self.check("loadBalancer.frontendIPConfigurations[0].properties.publicIPAddress.contains(id, '{pub_ip}')", True)
])
self.cmd('network lb list', checks=[
self.check('type(@)', 'array'),
self.check("length([?type == '{rt}']) == length(@)", True)
])
self.cmd('network lb list --resource-group {rg}', checks=[
self.check('type(@)', 'array'),
self.check("length([?type == '{rt}']) == length(@)", True),
self.check("length([?resourceGroup == '{rg}']) == length(@)", True)
])
self.cmd('network lb show --resource-group {rg} --name {lb}1', checks=[
self.check('type(@)', 'object'),
self.check('type', '{rt}'),
self.check('resourceGroup', '{rg}'),
self.check('name', '{lb}1')
])
self.cmd('network lb delete --resource-group {rg} --name {lb}1')
# Expecting no results as we just deleted the only lb in the resource group
self.cmd('network lb list --resource-group {rg}',
checks=self.check('length(@)', 3))
class NetworkLoadBalancerIpConfigScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_load_balancer_ip_config', location='eastus2')
def test_network_load_balancer_ip_config(self, resource_group):
for i in range(1, 4): # create 3 public IPs to use for the test
self.cmd('network public-ip create -g {{rg}} -n publicip{}'.format(i))
# create internet-facing LB with public IP (lb1)
self.cmd('network lb create -g {rg} -n lb1 --public-ip-address publicip1')
# create internal LB (lb2)
self.cmd('network vnet create -g {rg} -n vnet1 --subnet-name subnet1')
self.cmd('network vnet subnet create -g {rg} --vnet-name vnet1 -n subnet2 --address-prefix 10.0.1.0/24')
self.cmd('network lb create -g {rg} -n lb2 --subnet subnet1 --vnet-name vnet1')
# Test frontend IP configuration for internet-facing LB
self.cmd('network lb frontend-ip create -g {rg} --lb-name lb1 -n ipconfig1 --public-ip-address publicip2')
self.cmd('network lb frontend-ip list -g {rg} --lb-name lb1',
checks=self.check('length(@)', 2))
self.cmd('network lb frontend-ip update -g {rg} --lb-name lb1 -n ipconfig1 --public-ip-address publicip3')
self.cmd('network lb frontend-ip show -g {rg} --lb-name lb1 -n ipconfig1',
checks=self.check("publicIpAddress.contains(id, 'publicip3')", True))
# test generic update
self.kwargs['ip2_id'] = resource_id(subscription=self.get_subscription_id(), resource_group=self.kwargs['rg'], namespace='Microsoft.Network', type='publicIPAddresses', name='publicip2')
self.cmd('network lb frontend-ip update -g {rg} --lb-name lb1 -n ipconfig1 --set publicIpAddress.id="{ip2_id}"',
checks=self.check("publicIpAddress.contains(id, 'publicip2')", True))
self.cmd('network lb frontend-ip delete -g {rg} --lb-name lb1 -n ipconfig1')
self.cmd('network lb frontend-ip list -g {rg} --lb-name lb1',
checks=self.check('length(@)', 1))
# Test frontend IP configuration for internal LB
self.cmd('network lb frontend-ip create -g {rg} --lb-name lb2 -n ipconfig2 --vnet-name vnet1 --subnet subnet1 --private-ip-address 10.0.0.99')
self.cmd('network lb frontend-ip list -g {rg} --lb-name lb2',
checks=self.check('length(@)', 2))
self.cmd('network lb frontend-ip update -g {rg} --lb-name lb2 -n ipconfig2 --subnet subnet2 --vnet-name vnet1 --private-ip-address 10.0.1.100')
self.cmd('network lb frontend-ip show -g {rg} --lb-name lb2 -n ipconfig2',
checks=self.check("subnet.contains(id, 'subnet2')", True))
class NetworkLoadBalancerOutboundRulesScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='test_network_lb_outbound_rules', location='eastus2')
def test_network_load_balancer_outbound_rules(self, resource_group, resource_group_location):
self.kwargs.update({
'loc': resource_group_location,
'lb': 'lb1',
'prefix': 'prefix1',
'frontend1': 'LoadBalancerFrontEnd',
'frontend2': 'prefixFrontEnd',
'backend': 'lb1bepool',
'rule1': 'rule1',
'rule2': 'rule2'
})
self.cmd('network lb create -g {rg} -n {lb} --sku Standard')
self.cmd('network public-ip prefix create -g {rg} -n {prefix} --length 30')
self.cmd('network lb frontend-ip create -g {rg} --lb-name {lb} -n {frontend2} --public-ip-prefix {prefix}')
self.cmd('network lb outbound-rule create -g {rg} --lb-name {lb} -n {rule1} --address-pool {backend} --enable-tcp-reset --frontend-ip-configs {frontend1} --outbound-ports 512 --protocol Tcp', checks=[
self.check('enableTcpReset', True),
self.check('protocol', 'Tcp'),
self.check('allocatedOutboundPorts', 512),
self.check("contains(backendAddressPool.id, '{backend}')", True),
self.check("contains(frontendIpConfigurations[0].id, '{frontend1}')", True)
])
self.cmd('network lb outbound-rule create -g {rg} --lb-name {lb} -n {rule2} --address-pool {backend} --frontend-ip-configs {frontend2} --idle-timeout 20 --protocol all', checks=[
self.check('idleTimeoutInMinutes', 20),
self.check("contains(backendAddressPool.id, '{backend}')", True),
self.check("contains(frontendIpConfigurations[0].id, '{frontend2}')", True)
])
self.cmd('network lb outbound-rule update -g {rg} --lb-name {lb} -n {rule2} --idle-timeout 25',
checks=self.check('idleTimeoutInMinutes', 25))
self.cmd('network lb outbound-rule list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 2))
self.cmd('network lb outbound-rule show -g {rg} --lb-name {lb} -n {rule1}', checks=[
self.check('enableTcpReset', True),
self.check('protocol', 'Tcp'),
self.check('allocatedOutboundPorts', 512),
self.check("contains(backendAddressPool.id, '{backend}')", True),
self.check("contains(frontendIpConfigurations[0].id, '{frontend1}')", True)
])
self.cmd('network lb outbound-rule delete -g {rg} --lb-name {lb} -n {rule1}')
self.cmd('network lb outbound-rule list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 1))
class NetworkLoadBalancerSubresourceScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_lb_nat_rules', location='eastus2')
def test_network_lb_nat_rules(self, resource_group):
self.kwargs['lb'] = 'lb1'
self.cmd('network lb create -g {rg} -n {lb}')
for count in range(1, 3):
self.cmd('network lb inbound-nat-rule create -g {{rg}} --lb-name {{lb}} -n rule{0} --protocol tcp --frontend-port {0} --backend-port {0} --frontend-ip-name LoadBalancerFrontEnd'.format(count))
self.cmd('network lb inbound-nat-rule create -g {rg} --lb-name {lb} -n rule3 --protocol tcp --frontend-port 3 --backend-port 3')
self.cmd('network lb inbound-nat-rule list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 3))
self.cmd('network lb inbound-nat-rule update -g {rg} --lb-name {lb} -n rule1 --floating-ip true --idle-timeout 10')
self.cmd('network lb inbound-nat-rule show -g {rg} --lb-name {lb} -n rule1', checks=[
self.check('enableFloatingIp', True),
self.check('idleTimeoutInMinutes', 10)
])
# test generic update
self.cmd('network lb inbound-nat-rule update -g {rg} --lb-name {lb} -n rule1 --set idleTimeoutInMinutes=5',
checks=self.check('idleTimeoutInMinutes', 5))
for count in range(1, 4):
self.cmd('network lb inbound-nat-rule delete -g {{rg}} --lb-name {{lb}} -n rule{}'.format(count))
self.cmd('network lb inbound-nat-rule list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 0))
@ResourceGroupPreparer(name_prefix='cli_test_lb_nat_pools', location='eastus2')
def test_network_lb_nat_pools(self, resource_group):
self.kwargs['lb'] = 'lb1'
self.cmd('network lb create -g {rg} -n {lb}')
for count in range(1000, 4000, 1000):
self.cmd('network lb inbound-nat-pool create -g {{rg}} --lb-name {{lb}} -n rule{0} --protocol tcp --frontend-port-range-start {0} --frontend-port-range-end {1} --backend-port {0}'.format(count, count + 999))
self.cmd('network lb inbound-nat-pool list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 3))
self.cmd('network lb inbound-nat-pool update -g {rg} --lb-name {lb} -n rule1000 --protocol udp --backend-port 50 --floating-ip --idle-timeout 20')
self.cmd('network lb inbound-nat-pool show -g {rg} --lb-name {lb} -n rule1000', checks=[
self.check('protocol', 'Udp'),
self.check('backendPort', 50),
self.check('enableFloatingIp', True),
self.check('idleTimeoutInMinutes', 20)
])
# test generic update
self.cmd('network lb inbound-nat-pool update -g {rg} --lb-name {lb} -n rule1000 --set protocol=Tcp',
checks=self.check('protocol', 'Tcp'))
for count in range(1000, 4000, 1000):
self.cmd('network lb inbound-nat-pool delete -g {{rg}} --lb-name {{lb}} -n rule{}'.format(count))
self.cmd('network lb inbound-nat-pool list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 0))
@ResourceGroupPreparer(name_prefix='cli_test_lb_address_pool', location='eastus2')
def test_network_lb_address_pool(self, resource_group):
self.kwargs['lb'] = 'lb1'
self.cmd('network lb create -g {rg} -n {lb}')
for i in range(1, 4):
self.cmd('network lb address-pool create -g {{rg}} --lb-name {{lb}} -n bap{}'.format(i),
checks=self.check('name', 'bap{}'.format(i)))
self.cmd('network lb address-pool list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 4))
self.cmd('network lb address-pool show -g {rg} --lb-name {lb} -n bap1',
checks=self.check('name', 'bap1'))
self.cmd('network lb address-pool delete -g {rg} --lb-name {lb} -n bap1',
checks=self.is_empty())
self.cmd('network lb address-pool list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 3))
@ResourceGroupPreparer(name_prefix='cli_test_lb_address_pool_addresses', location='eastus2')
def test_network_lb_address_pool_addresses(self, resource_group):
self.kwargs.update({
'lb': 'lb1',
'vnet': 'clitestvnet',
'nic': 'clitestnic',
'rg': resource_group,
'lb_address_pool_file_path': os.path.join(TEST_DIR, 'test-address-pool-config.json')
})
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name subnet1')
self.cmd('network nic create -g {rg} -n {nic} --subnet subnet1 --vnet-name {vnet}')
self.cmd('network lb create -g {rg} -n {lb} --sku Standard')
with self.assertRaisesRegexp(CLIError, 'Each backend address must have name, vnet and ip-address information.'):
self.cmd('network lb address-pool create -g {rg} --lb-name {lb} -n bap1 --vnet {vnet} --backend-address name=addr2')
with self.assertRaisesRegexp(CLIError, 'Each backend address must have name, vnet and ip-address information.'):
self.cmd('network lb address-pool create -g {rg} --lb-name {lb} -n bap1 --backend-address name=addr2 ip-address=10.0.0.3')
with self.assertRaisesRegexp(CLIError, 'Each backend address must have name, vnet and ip-address information.'):
self.cmd('network lb address-pool create -g {rg} --lb-name {lb} -n bap1 --vnet {vnet} --backend-address ip-address=10.0.0.3')
self.cmd('network lb address-pool create -g {rg} --lb-name {lb} -n bap1 --vnet {vnet} '
'--backend-address name=addr1 ip-address=10.0.0.1 '
'--backend-address name=addr2 ip-address=10.0.0.2 '
'--backend-address name=addr3 ip-address=10.0.0.3',
checks=self.check('name', 'bap1'))
self.cmd('network lb address-pool address add -g {rg} --lb-name {lb} --pool-name bap1 --name addr6 --vnet {vnet} --ip-address 10.0.0.6', checks=self.check('name', 'bap1'))
self.cmd('network lb address-pool address remove -g {rg} --lb-name {lb} --pool-name bap1 --name addr2', checks=self.check('name', 'bap1'))
self.cmd('network lb address-pool address list -g {rg} --lb-name {lb} --pool-name bap1', checks=self.check('length(@)', '3'))
self.cmd('network lb address-pool list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 2))
self.cmd('network lb address-pool show -g {rg} --lb-name {lb} -n bap1',
checks=self.check('name', 'bap1'))
self.cmd('network lb address-pool delete -g {rg} --lb-name {lb} -n bap1',
checks=self.is_empty())
self.cmd('network lb address-pool list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 1))
self.cmd('network lb address-pool create -g {rg} --lb-name {lb} -n bap1 --vnet {vnet} '
'--backend-addresses-config-file @"{lb_address_pool_file_path}"',
checks=self.check('name', 'bap1'))
self.cmd('network lb address-pool address list -g {rg} --lb-name {lb} --pool-name bap1', checks=self.check('length(@)', '2'))
self.cmd('network lb address-pool delete -g {rg} --lb-name {lb} -n bap1', checks=self.is_empty())
self.cmd('network lb address-pool list -g {rg} --lb-name {lb}', checks=self.check('length(@)', 1))
self.cmd('network lb address-pool create -g {rg} --lb-name {lb} -n bap1 --vnet {vnet}', checks=self.check('name', 'bap1'))
self.cmd('network lb address-pool address add -g {rg} --lb-name {lb} --pool-name bap1 --name addr6 --vnet {vnet} --ip-address 10.0.0.6', checks=self.check('name', 'bap1'))
self.cmd('network lb address-pool address list -g {rg} --lb-name {lb} --pool-name bap1', checks=self.check('length(@)', '1'))
@ResourceGroupPreparer(name_prefix='cli_test_lb_probes', location='eastus2')
def test_network_lb_probes(self, resource_group):
self.kwargs['lb'] = 'lb1'
self.kwargs['lb2'] = 'lb2'
self.cmd('network lb create -g {rg} -n {lb}')
for i in range(1, 4):
self.cmd('network lb probe create -g {{rg}} --lb-name {{lb}} -n probe{0} --port {0} --protocol http --path "/test{0}"'.format(i))
self.cmd('network lb probe list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 3))
self.cmd('network lb probe update -g {rg} --lb-name {lb} -n probe1 --interval 20 --threshold 5')
self.cmd('network lb probe update -g {rg} --lb-name {lb} -n probe2 --protocol tcp --path ""')
self.cmd('network lb probe show -g {rg} --lb-name {lb} -n probe1', checks=[
self.check('intervalInSeconds', 20),
self.check('numberOfProbes', 5)
])
# test generic update
self.cmd('network lb probe update -g {rg} --lb-name {lb} -n probe1 --set intervalInSeconds=15 --set numberOfProbes=3', checks=[
self.check('intervalInSeconds', 15),
self.check('numberOfProbes', 3)
])
self.cmd('network lb probe show -g {rg} --lb-name {lb} -n probe2', checks=[
self.check('protocol', 'Tcp'),
self.check('path', None)
])
self.cmd('network lb probe delete -g {rg} --lb-name {lb} -n probe3')
self.cmd('network lb probe list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 2))
# test standard LB supports https probe
self.cmd('network lb create -g {rg} -n {lb2} --sku standard')
self.cmd('network lb probe create -g {rg} --lb-name {lb2} -n probe1 --port 443 --protocol https --path "/test1"')
self.cmd('network lb probe list -g {rg} --lb-name {lb2}', checks=self.check('[0].protocol', 'Https'))
@ResourceGroupPreparer(name_prefix='cli_test_lb_rules', location='eastus2')
def test_network_lb_rules(self, resource_group):
self.kwargs['lb'] = 'lb1'
self.cmd('network lb create -g {rg} -n {lb}')
self.cmd('network lb rule create -g {rg} --lb-name {lb} -n rule2 --frontend-port 60 --backend-port 60 --protocol tcp')
self.cmd('network lb address-pool create -g {rg} --lb-name {lb} -n bap1')
self.cmd('network lb address-pool create -g {rg} --lb-name {lb} -n bap2')
self.cmd('network lb rule create -g {rg} --lb-name {lb} -n rule1 --frontend-ip-name LoadBalancerFrontEnd --frontend-port 40 --backend-pool-name bap1 --backend-port 40 --protocol tcp')
self.cmd('network lb rule list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 2))
self.cmd('network lb rule update -g {rg} --lb-name {lb} -n rule1 --floating-ip true --idle-timeout 20 --load-distribution sourceip --protocol udp')
self.cmd('network lb rule update -g {rg} --lb-name {lb} -n rule2 --backend-pool-name bap2 --load-distribution sourceipprotocol')
self.cmd('network lb rule show -g {rg} --lb-name {lb} -n rule1', checks=[
self.check('enableFloatingIp', True),
self.check('idleTimeoutInMinutes', 20),
self.check('loadDistribution', 'SourceIP'),
self.check('protocol', 'Udp')
])
# test generic update
self.cmd('network lb rule update -g {rg} --lb-name {lb} -n rule1 --set idleTimeoutInMinutes=5',
checks=self.check('idleTimeoutInMinutes', 5))
self.cmd('network lb rule show -g {rg} --lb-name {lb} -n rule2', checks=[
self.check("backendAddressPool.contains(id, 'bap2')", True),
self.check('loadDistribution', 'SourceIPProtocol')
])
self.cmd('network lb rule delete -g {rg} --lb-name {lb} -n rule1')
self.cmd('network lb rule delete -g {rg} --lb-name {lb} -n rule2')
self.cmd('network lb rule list -g {rg} --lb-name {lb}',
checks=self.check('length(@)', 0))
class NetworkLocalGatewayScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='local_gateway_scenario')
def test_network_local_gateway(self, resource_group):
self.kwargs.update({
'lgw1': 'lgw1',
'lgw2': 'lgw2',
'rt': 'Microsoft.Network/localNetworkGateways'
})
self.cmd('network local-gateway create --resource-group {rg} --name {lgw1} --gateway-ip-address 10.1.1.1 --tags foo=doo')
self.cmd('network local-gateway update --resource-group {rg} --name {lgw1} --tags foo=boo',
checks=self.check('tags.foo', 'boo'))
self.cmd('network local-gateway show --resource-group {rg} --name {lgw1}', checks=[
self.check('type', '{rt}'),
self.check('resourceGroup', '{rg}'),
self.check('name', '{lgw1}')])
self.cmd('network local-gateway create --resource-group {rg} --name {lgw2} --gateway-ip-address 10.1.1.2 --local-address-prefixes 10.0.1.0/24',
checks=self.check('localNetworkAddressSpace.addressPrefixes[0]', '10.0.1.0/24'))
self.cmd('network local-gateway list --resource-group {rg}',
checks=self.check('length(@)', 2))
self.cmd('network local-gateway delete --resource-group {rg} --name {lgw1}')
self.cmd('network local-gateway list --resource-group {rg}',
checks=self.check('length(@)', 1))
class NetworkNicScenarioTest(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_nic_scenario', location='eastus2')
def test_network_nic(self, resource_group):
self.kwargs.update({
'nic': 'cli-test-nic',
'rt': 'Microsoft.Network/networkInterfaces',
'subnet': 'mysubnet',
'vnet': 'myvnet',
'nsg1': 'mynsg',
'nsg2': 'myothernsg',
'lb': 'mylb',
'pri_ip': '10.0.0.15',
'pub_ip': 'publicip1'
})
self.kwargs['subnet_id'] = self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet}').get_output_in_json()['newVNet']['subnets'][0]['id']
self.cmd('network nsg create -g {rg} -n {nsg1}')
self.kwargs['nsg_id'] = self.cmd('network nsg show -g {rg} -n {nsg1}').get_output_in_json()['id']
self.cmd('network nsg create -g {rg} -n {nsg2}')
self.cmd('network public-ip create -g {rg} -n {pub_ip}')
self.kwargs['pub_ip_id'] = self.cmd('network public-ip show -g {rg} -n {pub_ip}').get_output_in_json()['id']
self.cmd('network lb create -g {rg} -n {lb}')
self.cmd('network lb inbound-nat-rule create -g {rg} --lb-name {lb} -n rule1 --protocol tcp --frontend-port 100 --backend-port 100 --frontend-ip-name LoadBalancerFrontEnd')
self.cmd('network lb inbound-nat-rule create -g {rg} --lb-name {lb} -n rule2 --protocol tcp --frontend-port 200 --backend-port 200 --frontend-ip-name LoadBalancerFrontEnd')
self.kwargs['rule_ids'] = ' '.join(self.cmd('network lb inbound-nat-rule list -g {rg} --lb-name {lb} --query "[].id"').get_output_in_json())
self.cmd('network lb address-pool create -g {rg} --lb-name {lb} -n bap1')
self.cmd('network lb address-pool create -g {rg} --lb-name {lb} -n bap2')
self.kwargs['address_pool_ids'] = ' '.join(self.cmd('network lb address-pool list -g {rg} --lb-name {lb} --query "[].id"').get_output_in_json())
# create with minimum parameters
self.cmd('network nic create -g {rg} -n {nic} --subnet {subnet} --vnet-name {vnet}', checks=[
self.check('NewNIC.ipConfigurations[0].privateIpAllocationMethod', 'Dynamic'),
self.check('NewNIC.provisioningState', 'Succeeded')
])
# exercise optional parameters
self.cmd('network nic create -g {rg} -n {nic} --subnet {subnet_id} --ip-forwarding --private-ip-address {pri_ip} --public-ip-address {pub_ip} --internal-dns-name test --dns-servers 100.1.2.3 --lb-address-pools {address_pool_ids} --lb-inbound-nat-rules {rule_ids} --accelerated-networking --tags foo=doo', checks=[
self.check('NewNIC.ipConfigurations[0].privateIpAllocationMethod', 'Static'),
self.check('NewNIC.ipConfigurations[0].privateIpAddress', '{pri_ip}'),
self.check('NewNIC.enableIpForwarding', True),
self.check('NewNIC.enableAcceleratedNetworking', True),
self.check('NewNIC.provisioningState', 'Succeeded'),
self.check('NewNIC.dnsSettings.internalDnsNameLabel', 'test'),
self.check('length(NewNIC.dnsSettings.dnsServers)', 1)
])
# exercise creating with NSG
self.cmd('network nic create -g {rg} -n {nic} --subnet {subnet} --vnet-name {vnet} --network-security-group {nsg1}', checks=[
self.check('NewNIC.ipConfigurations[0].privateIpAllocationMethod', 'Dynamic'),
self.check('NewNIC.enableIpForwarding', False),
self.check("NewNIC.networkSecurityGroup.contains(id, '{nsg1}')", True),
self.check('NewNIC.provisioningState', 'Succeeded')
])
# exercise creating with NSG and Public IP
self.cmd('network nic create -g {rg} -n {nic} --subnet {subnet} --vnet-name {vnet} --network-security-group {nsg_id} --public-ip-address {pub_ip_id}', checks=[
self.check('NewNIC.ipConfigurations[0].privateIpAllocationMethod', 'Dynamic'),
self.check('NewNIC.enableIpForwarding', False),
self.check("NewNIC.networkSecurityGroup.contains(id, '{nsg1}')", True),
self.check('NewNIC.provisioningState', 'Succeeded')
])
self.cmd('network nic list', checks=[
self.check('type(@)', 'array'),
self.check("length([?contains(id, 'networkInterfaces')]) == length(@)", True)
])
self.cmd('network nic list --resource-group {rg}', checks=[
self.check('type(@)', 'array'),
self.check("length([?type == '{rt}']) == length(@)", True),
self.check("length([?resourceGroup == '{rg}']) == length(@)", True)
])
self.cmd('network nic show --resource-group {rg} --name {nic}', checks=[
self.check('type(@)', 'object'),
self.check('type', '{rt}'),
self.check('resourceGroup', '{rg}'),
self.check('name', '{nic}')
])
self.cmd('network nic update -g {rg} -n {nic} --internal-dns-name noodle --ip-forwarding true --accelerated-networking false --dns-servers "" --network-security-group {nsg2}', checks=[
self.check('enableIpForwarding', True),
self.check('enableAcceleratedNetworking', False),
self.check('dnsSettings.internalDnsNameLabel', 'noodle'),
self.check('length(dnsSettings.dnsServers)', 0),
self.check("networkSecurityGroup.contains(id, '{nsg2}')", True)
])
# test generic update
self.cmd('network nic update -g {rg} -n {nic} --set dnsSettings.internalDnsNameLabel=doodle --set enableIpForwarding=false', checks=[
self.check('enableIpForwarding', False),
self.check('dnsSettings.internalDnsNameLabel', 'doodle')
])
self.cmd('network nic delete --resource-group {rg} --name {nic}')
self.cmd('network nic list -g {rg}', checks=self.is_empty())
class NetworkNicAppGatewayScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_nic_app_gateway', location='eastus2')
def test_network_nic_app_gateway(self, resource_group):
from azure.core.exceptions import HttpResponseError
import json
self.kwargs.update({
'nic': 'nic1',
'ag': 'ag1',
'vnet': 'vnet1',
'subnet1': 'subnet1',
'subnet2': 'subnet2',
'ip': 'ip1',
'lb': 'lb1',
'bap': 'bap1',
'pool1': 'appGatewayBackendPool',
'pool2': 'bepool2',
'config1': 'ipconfig1',
'config2': 'ipconfig2'
})
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet1} --defer')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet} -n {subnet2} --address-prefix 10.0.1.0/24')
self.cmd('network application-gateway create -g {rg} -n {ag} --vnet-name {vnet} --subnet {subnet1} --no-wait')
self.cmd('network application-gateway wait -g {rg} -n {ag} --exists --timeout 120')
self.kwargs['ipaddres'] = json.dumps(
{
"ip_address": "10.20.0.69"
}
)
self.cmd("network application-gateway address-pool update -g {rg} --gateway-name {ag} -n {pool1} --add backendAddresses \'{ipaddres}\'", checks=[
self.check('length(backendAddresses)', 1)
])
self.kwargs['ipaddres'] = json.dumps(
{
"ip_address": "10.20.0.70"
}
)
self.cmd("network application-gateway address-pool update -g {rg} --gateway-name {ag} -n {pool1} --add backendAddresses \'{ipaddres}\'", checks=[
self.check('length(backendAddresses)', 2)
])
self.cmd('network application-gateway address-pool create -g {rg} --gateway-name {ag} -n {pool2} --no-wait')
self.cmd('network lb create -g {rg} -n {lb}')
self.cmd('network lb address-pool create -g {rg} --lb-name {lb} -n {bap}')
self.cmd('network nic create -g {rg} -n {nic} --subnet {subnet2} --vnet-name {vnet} --gateway-name {ag} --app-gateway-address-pools {pool1}',
checks=self.check('length(NewNIC.ipConfigurations[0].applicationGatewayBackendAddressPools)', 1))
with self.assertRaisesRegexp(HttpResponseError, 'not supported for secondary IpConfigurations'):
self.cmd('network nic ip-config create -g {rg} --nic-name {nic} -n {config2} --subnet {subnet2} --vnet-name {vnet} --gateway-name {ag} --app-gateway-address-pools {pool2}')
self.cmd('network nic ip-config update -g {rg} --nic-name {nic} -n {config1} --gateway-name {ag} --app-gateway-address-pools {pool1} {pool2}',
checks=self.check('length(applicationGatewayBackendAddressPools)', 2))
self.cmd('az network nic ip-config address-pool add -g {rg} --nic-name {nic} --lb-name {lb} --address-pool {bap} --ip-config-name {config1}')
class NetworkNicSubresourceScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_nic_subresource')
def test_network_nic_subresources(self, resource_group):
self.kwargs['nic'] = 'nic1'
self.cmd('network vnet create -g {rg} -n vnet1 --subnet-name subnet1')
self.cmd('network nic create -g {rg} -n {nic} --subnet subnet1 --vnet-name vnet1')
self.cmd('network nic ip-config list -g {rg} --nic-name {nic}',
checks=self.check('length(@)', 1))
self.cmd('network nic ip-config show -g {rg} --nic-name {nic} -n ipconfig1', checks=[
self.check('name', 'ipconfig1'),
self.check('privateIpAllocationMethod', 'Dynamic')
])
self.cmd('network nic ip-config create -g {rg} --nic-name {nic} -n ipconfig2 --make-primary',
checks=self.check('primary', True))
self.cmd('network nic ip-config update -g {rg} --nic-name {nic} -n ipconfig1 --make-primary',
checks=self.check('primary', True))
self.cmd('network nic ip-config delete -g {rg} --nic-name {nic} -n ipconfig2')
# test various sets
self.kwargs.update({
'vnet': 'vnet2',
'subnet': 'subnet2',
'ip': 'publicip2',
'lb': 'lb1',
'config': 'ipconfig1'
})
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet}')
self.cmd('network public-ip create -g {rg} -n {ip}')
self.kwargs['ip_id'] = self.cmd('network public-ip show -g {rg} -n {ip}').get_output_in_json()['id']
self.cmd('network lb create -g {rg} -n {lb}')
self.cmd('network lb inbound-nat-rule create -g {rg} --lb-name {lb} -n rule1 --protocol tcp --frontend-port 100 --backend-port 100 --frontend-ip-name LoadBalancerFrontEnd')
self.cmd('network lb inbound-nat-rule create -g {rg} --lb-name {lb} -n rule2 --protocol tcp --frontend-port 200 --backend-port 200 --frontend-ip-name LoadBalancerFrontEnd')
self.kwargs['rule1_id'] = self.cmd('network lb inbound-nat-rule show -g {rg} --lb-name {lb} -n rule1').get_output_in_json()['id']
self.cmd('network lb address-pool create -g {rg} --lb-name {lb} -n bap1')
self.cmd('network lb address-pool create -g {rg} --lb-name {lb} -n bap2')
self.kwargs['bap1_id'] = self.cmd('network lb address-pool show -g {rg} --lb-name {lb} -n bap1').get_output_in_json()['id']
self.kwargs['private_ip'] = '10.0.0.15'
# test ability to set load balancer IDs
# includes the default backend pool
self.cmd('network nic ip-config update -g {rg} --nic-name {nic} -n {config} --lb-name {lb} --lb-address-pools {bap1_id} bap2 --lb-inbound-nat-rules {rule1_id} rule2 --private-ip-address {private_ip}', checks=[
self.check('length(loadBalancerBackendAddressPools)', 2),
self.check('length(loadBalancerInboundNatRules)', 2),
self.check('privateIpAddress', '{private_ip}'),
self.check('privateIpAllocationMethod', 'Static')])
# test generic update
self.cmd('network nic ip-config update -g {rg} --nic-name {nic} -n {config} --set privateIpAddress=10.0.0.50',
checks=self.check('privateIpAddress', '10.0.0.50'))
# test ability to add and remove IDs one at a time with subcommands
self.cmd('network nic ip-config inbound-nat-rule remove -g {rg} --lb-name {lb} --nic-name {nic} --ip-config-name {config} --inbound-nat-rule rule1',
checks=self.check('length(loadBalancerInboundNatRules)', 1))
self.cmd('network nic ip-config inbound-nat-rule add -g {rg} --lb-name {lb} --nic-name {nic} --ip-config-name {config} --inbound-nat-rule rule1',
checks=self.check('length(loadBalancerInboundNatRules)', 2))
self.cmd('network nic ip-config address-pool remove -g {rg} --lb-name {lb} --nic-name {nic} --ip-config-name {config} --address-pool bap1',
checks=self.check('length(loadBalancerBackendAddressPools)', 1))
self.cmd('network nic ip-config address-pool add -g {rg} --lb-name {lb} --nic-name {nic} --ip-config-name {config} --address-pool bap1',
checks=self.check('length(loadBalancerBackendAddressPools)', 2))
self.cmd('network nic ip-config update -g {rg} --nic-name {nic} -n {config} --private-ip-address "" --public-ip-address {ip_id}', checks=[
self.check('privateIpAllocationMethod', 'Dynamic'),
self.check("publicIpAddress.contains(id, '{ip_id}')", True)
])
self.cmd('network nic ip-config update -g {rg} --nic-name {nic} -n {config} --subnet {subnet} --vnet-name {vnet}',
checks=self.check("subnet.contains(id, '{subnet}')", True))
@ResourceGroupPreparer(name_prefix='cli_test_nic_lb_address_pools', location='eastus2')
def test_network_nic_lb_address_pools(self, resource_group):
self.kwargs.update({
'nic': 'nic1',
'vnet': 'vnet1',
'subnet': 'subnet1',
'config': 'ipconfig1',
'lb': 'lb1',
'pool': 'pool1'
})
self.cmd('network vnet create -g {rg} -n vnet1 --subnet-name subnet1')
self.cmd('network nic create -g {rg} -n {nic} --subnet subnet1 --vnet-name vnet1')
self.cmd('network lb create -g {rg} -n {lb}')
self.cmd('network lb address-pool create -g {rg} --lb-name {lb} -n {pool}')
self.kwargs['lb_pool_id'] = self.cmd('network lb address-pool show -g {rg} --lb-name {lb} -n {pool}').get_output_in_json()['id']
self.cmd('network nic ip-config address-pool add -g {rg} --lb-name {lb} --nic-name {nic} --ip-config-name {config} --address-pool {pool}',
checks=self.check('length(loadBalancerBackendAddressPools)', 1))
self.cmd('network nic ip-config address-pool remove -g {rg} --lb-name {lb} --nic-name {nic} --ip-config-name {config} --address-pool {pool}',
checks=self.check('loadBalancerBackendAddressPools', None))
self.cmd('network nic ip-config address-pool add -g {rg} --nic-name {nic} --ip-config-name {config} --address-pool {lb_pool_id}',
checks=self.check('length(loadBalancerBackendAddressPools)', 1))
self.cmd('network nic ip-config address-pool remove -g {rg} --nic-name {nic} --ip-config-name {config} --address-pool {lb_pool_id}',
checks=self.check('loadBalancerBackendAddressPools', None))
@ResourceGroupPreparer(name_prefix='cli_test_nic_ag_address_pools')
def test_network_nic_ag_address_pools(self, resource_group):
self.kwargs.update({
'nic': 'nic1',
'vnet': 'vnet1',
'subnet1': 'subnet1',
'subnet2': 'subnet2',
'config': 'ipconfig1',
'ag': 'ag1',
'pool': 'pool1'
})
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet1} --defer')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet} -n {subnet2} --address-prefix 10.0.1.0/24')
self.cmd('network application-gateway create -g {rg} -n {ag} --vnet-name {vnet} --subnet {subnet1} --no-wait')
self.cmd('network application-gateway wait -g {rg} -n {ag} --exists --timeout 120')
self.cmd('network application-gateway address-pool create -g {rg} --gateway-name {ag} -n {pool} --no-wait')
self.kwargs['ag_pool_id'] = self.cmd('network application-gateway address-pool show -g {rg} --gateway-name {ag} -n {pool}').get_output_in_json()['id']
self.cmd('network nic create -g {rg} -n {nic} --subnet {subnet2} --vnet-name {vnet}')
self.cmd('network nic ip-config address-pool add -g {rg} --gateway-name {ag} --nic-name {nic} --ip-config-name {config} --address-pool {pool}',
checks=self.check('length(applicationGatewayBackendAddressPools)', 1))
self.cmd('network nic ip-config address-pool remove -g {rg} --gateway-name {ag} --nic-name {nic} --ip-config-name {config} --address-pool {pool}',
checks=self.check('applicationGatewayBackendAddressPools', None))
self.cmd('network nic ip-config address-pool add -g {rg} --nic-name {nic} --ip-config-name {config} --address-pool {ag_pool_id}',
checks=self.check('length(applicationGatewayBackendAddressPools)', 1))
self.cmd('network nic ip-config address-pool remove -g {rg} --nic-name {nic} --ip-config-name {config} --address-pool {ag_pool_id}',
checks=self.check('applicationGatewayBackendAddressPools', None))
class NetworkNicConvenienceCommandsScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_nic_convenience_test')
def test_network_nic_convenience_commands(self, resource_group):
self.kwargs['vm'] = 'conveniencevm1'
self.cmd('vm create -g {rg} -n {vm} --image UbuntuLTS --admin-username myusername --admin-password aBcD1234!@#$ --authentication-type password --nsg-rule None')
self.kwargs['nic_id'] = self.cmd('vm show -g {rg} -n {vm} --query "networkProfile.networkInterfaces[0].id"').get_output_in_json()
self.cmd('network nic list-effective-nsg --ids {nic_id}',
checks=self.greater_than('length(@)', 0))
self.cmd('network nic show-effective-route-table --ids {nic_id}',
checks=self.greater_than('length(@)', 0))
class NetworkExtendedNSGScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_extended_nsg')
def test_network_extended_nsg(self, resource_group):
self.kwargs.update({
'nsg': 'nsg1',
'rule': 'rule1'
})
self.cmd('network nsg create --name {nsg} -g {rg}')
self.cmd('network nsg rule create --access allow --destination-address-prefixes 10.0.0.0/24 11.0.0.0/24 --direction inbound --nsg-name {nsg} --protocol * -g {rg} --source-address-prefix * -n {rule} --source-port-range 700-900 1000-1100 --destination-port-range 4444 --priority 1000', checks=[
self.check('length(destinationAddressPrefixes)', 2),
self.check('destinationAddressPrefix', ''),
self.check('length(sourceAddressPrefixes)', 0),
self.check('sourceAddressPrefix', '*'),
self.check('length(sourcePortRanges)', 2),
self.check('sourcePortRange', None),
self.check('length(destinationPortRanges)', 0),
self.check('destinationPortRange', '4444')
])
self.cmd('network nsg rule update --destination-address-prefixes Internet --nsg-name {nsg} -g {rg} --source-address-prefix 10.0.0.0/24 11.0.0.0/24 -n {rule} --source-port-range * --destination-port-range 500-1000 2000 3000', checks=[
self.check('length(destinationAddressPrefixes)', 0),
self.check('destinationAddressPrefix', 'Internet'),
self.check('length(sourceAddressPrefixes)', 2),
self.check('sourceAddressPrefix', ''),
self.check('length(sourcePortRanges)', 0),
self.check('sourcePortRange', '*'),
self.check('length(destinationPortRanges)', 3),
self.check('destinationPortRange', None)
])
class NetworkSecurityGroupScenarioTest(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_nsg')
def test_network_nsg(self, resource_group):
self.kwargs.update({
'nsg': 'test-nsg1',
'rule': 'web',
'rt': 'Microsoft.Network/networkSecurityGroups'
})
self.cmd('network nsg create --name {nsg} -g {rg} --tags foo=doo')
self.cmd('network nsg rule create --access allow --destination-address-prefix 1234 --direction inbound --nsg-name {nsg} --protocol * -g {rg} --source-address-prefix 789 -n {rule} --source-port-range * --destination-port-range 4444 --priority 1000')
self.cmd('network nsg list', checks=[
self.check('type(@)', 'array'),
self.check("length([?type == '{rt}']) == length(@)", True)
])
self.cmd('network nsg list --resource-group {rg}', checks=[
self.check('type(@)', 'array'),
self.check("length([?type == '{rt}']) == length(@)", True),
self.check("length([?resourceGroup == '{rg}']) == length(@)", True)
])
self.cmd('network nsg show --resource-group {rg} --name {nsg}', checks=[
self.check('type(@)', 'object'),
self.check('type', '{rt}'),
self.check('resourceGroup', '{rg}'),
self.check('name', '{nsg}')
])
# Test for the manually added nsg rule
self.cmd('network nsg rule list --resource-group {rg} --nsg-name {nsg}', checks=[
self.check('type(@)', 'array'),
self.check('length(@)', 1),
self.check("length([?resourceGroup == '{rg}']) == length(@)", True)
])
self.cmd('network nsg rule show --resource-group {rg} --nsg-name {nsg} --name {rule}', checks=[
self.check('type(@)', 'object'),
self.check('resourceGroup', '{rg}'),
self.check('name', '{rule}')
])
self.kwargs.update({
'access': 'DENY',
'prefix': '111',
'dir': 'Outbound',
'protocol': 'Tcp',
'ports': '1234-1235',
'desc': 'greatrule',
'priority': 888
})
self.cmd('network nsg rule update -g {rg} --nsg-name {nsg} -n {rule} --direction {dir} --access {access} --destination-address-prefix {prefix} --protocol {protocol} --source-address-prefix {prefix} --source-port-range {ports} --destination-port-range {ports} --priority {priority} --description {desc}', checks=[
self.check('access', 'Deny'),
self.check('direction', '{dir}'),
self.check('destinationAddressPrefix', '{prefix}'),
self.check('protocol', '{protocol}'),
self.check('sourceAddressPrefix', '{prefix}'),
self.check('sourcePortRange', '{ports}'),
self.check('priority', '{priority}'),
self.check('description', '{desc}')
])
# test generic update
self.cmd('network nsg rule update -g {rg} --nsg-name {nsg} -n {rule} --set description="cool"',
checks=self.check('description', 'cool'))
self.cmd('network nsg rule delete --resource-group {rg} --nsg-name {nsg} --name {rule}')
# Delete the network security group
self.cmd('network nsg delete --resource-group {rg} --name {nsg}')
# Expecting no results as we just deleted the only security group in the resource group
self.cmd('network nsg list --resource-group {rg}', checks=self.is_empty())
class NetworkRouteTableOperationScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_route_table')
def test_network_route_table_operation(self, resource_group):
self.kwargs.update({
'table': 'cli-test-route-table',
'route': 'my-route',
'rt': 'Microsoft.Network/routeTables'
})
self.cmd('network route-table create -n {table} -g {rg} --tags foo=doo',
checks=self.check('tags.foo', 'doo'))
self.cmd('network route-table update -n {table} -g {rg} --tags foo=boo --disable-bgp-route-propagation', checks=[
self.check('tags.foo', 'boo')
])
self.cmd('network route-table route create --address-prefix 10.0.5.0/24 -n {route} -g {rg} --next-hop-type None --route-table-name {table}')
self.cmd('network route-table list',
checks=self.check('type(@)', 'array'))
self.cmd('network route-table list --resource-group {rg}', checks=[
self.check('type(@)', 'array'),
self.check('length(@)', 1),
self.check('[0].name', '{table}'),
self.check('[0].type', '{rt}')
])
self.cmd('network route-table show --resource-group {rg} --name {table}', checks=[
self.check('type(@)', 'object'),
self.check('name', '{table}'),
self.check('type', '{rt}')
])
self.cmd('network route-table route list --resource-group {rg} --route-table-name {table}',
checks=self.check('type(@)', 'array'))
self.cmd('network route-table route show --resource-group {rg} --route-table-name {table} --name {route}', checks=[
self.check('type(@)', 'object'),
self.check('name', '{route}'),
])
self.cmd('network route-table route delete --resource-group {rg} --route-table-name {table} --name {route}')
self.cmd('network route-table route list --resource-group {rg} --route-table-name {table}', checks=self.is_empty())
self.cmd('network route-table delete --resource-group {rg} --name {table}')
self.cmd('network route-table list --resource-group {rg}', checks=self.is_empty())
class NetworkVNetScenarioTest(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_vnet_test')
def test_network_vnet(self, resource_group):
self.kwargs.update({
'vnet': 'vnet1',
'subnet': 'subnet1',
'rt': 'Microsoft.Network/virtualNetworks'
})
self.cmd('network vnet create --resource-group {rg} --name {vnet} --subnet-name default', checks=[
self.check('newVNet.provisioningState', 'Succeeded'),
self.check('newVNet.addressSpace.addressPrefixes[0]', '10.0.0.0/16')
])
self.cmd('network vnet check-ip-address -g {rg} -n {vnet} --ip-address 10.0.0.50',
checks=self.check('available', True))
self.cmd('network vnet check-ip-address -g {rg} -n {vnet} --ip-address 10.0.0.0',
checks=self.check('available', False))
self.cmd('network vnet list', checks=[
self.check('type(@)', 'array'),
self.check("length([?type == '{rt}']) == length(@)", True)
])
self.cmd('network vnet list --resource-group {rg}', checks=[
self.check('type(@)', 'array'),
self.check("length([?type == '{rt}']) == length(@)", True),
])
self.cmd('network vnet show --resource-group {rg} --name {vnet}', checks=[
self.check('type(@)', 'object'),
self.check('name', '{vnet}'),
self.check('type', '{rt}')
])
self.kwargs['prefixes'] = '20.0.0.0/16 10.0.0.0/16'
self.cmd('network vnet update --resource-group {rg} --name {vnet} --address-prefixes {prefixes} --dns-servers 1.2.3.4', checks=[
self.check('length(addressSpace.addressPrefixes)', 2),
self.check('dhcpOptions.dnsServers[0]', '1.2.3.4')
])
self.cmd('network vnet update -g {rg} -n {vnet} --dns-servers ""', checks=[
self.check('length(addressSpace.addressPrefixes)', 2),
self.check('dhcpOptions.dnsServers', [])
])
# test generic update
self.cmd('network vnet update --resource-group {rg} --name {vnet} --set addressSpace.addressPrefixes[0]="20.0.0.0/24"',
checks=self.check('addressSpace.addressPrefixes[0]', '20.0.0.0/24'))
self.cmd('network vnet subnet create --resource-group {rg} --vnet-name {vnet} --name {subnet} --address-prefix 20.0.0.0/24')
self.cmd('network vnet subnet list --resource-group {rg} --vnet-name {vnet}',
checks=self.check('type(@)', 'array'))
self.cmd('network vnet subnet show --resource-group {rg} --vnet-name {vnet} --name {subnet}', checks=[
self.check('type(@)', 'object'),
self.check('name', '{subnet}'),
])
self.cmd('network vnet subnet delete --resource-group {rg} --vnet-name {vnet} --name {subnet}')
self.cmd('network vnet subnet list --resource-group {rg} --vnet-name {vnet}',
checks=self.check("length([?name == '{subnet}'])", 0))
self.cmd('network vnet list --resource-group {rg}',
checks=self.check("length([?name == '{vnet}'])", 1))
self.cmd('network vnet delete --resource-group {rg} --name {vnet}')
self.cmd('network vnet list --resource-group {rg}', checks=self.is_empty())
@ResourceGroupPreparer(name_prefix='cli_vnet_with_subnet_nsg_test')
def test_network_vnet_with_subnet_nsg(self, resource_group):
self.kwargs.update({
'vnet': 'vnet1',
'subnet': 'subnet1',
'nsg': 'nsg',
'rt': 'Microsoft.Network/virtualNetworks',
'prefixes': '20.0.0.0/16 10.0.0.0/16'
})
result = self.cmd('network nsg create --resource-group {rg} --name {nsg}').get_output_in_json()
self.kwargs['nsg_id'] = result['NewNSG']['id']
self.cmd('network vnet create --resource-group {rg} --name {vnet} --address-prefixes {prefixes} '
'--subnet-name {subnet} --subnet-prefixes 20.0.0.0/24 --nsg {nsg}')
self.cmd('network vnet subnet list --resource-group {rg} --vnet-name {vnet}',
checks=self.check('type(@)', 'array'))
self.cmd('network vnet subnet show --resource-group {rg} --vnet-name {vnet} --name {subnet}', checks=[
self.check('type(@)', 'object'),
self.check('name', '{subnet}'),
self.check('networkSecurityGroup.id', '{nsg_id}')
])
self.cmd('network vnet subnet delete --resource-group {rg} --vnet-name {vnet} --name {subnet}')
@ResourceGroupPreparer(name_prefix='cli_vnet_test')
def test_network_vnet_list_available_ips(self, resource_group):
self.kwargs.update({
'vnet': 'vnet1',
'subnet': 'subnet1',
'rt': 'Microsoft.Network/virtualNetworks',
'rg': resource_group
})
self.cmd('network vnet create --resource-group {rg} --name {vnet} --subnet-name default', checks=[
self.check('newVNet.provisioningState', 'Succeeded'),
self.check('newVNet.addressSpace.addressPrefixes[0]', '10.0.0.0/16')
])
self.kwargs['prefixes'] = '20.0.0.0/16 10.0.0.0/16'
self.cmd('network vnet update --resource-group {rg} --name {vnet} --address-prefixes {prefixes} --dns-servers 1.2.3.4', checks=[
self.check('length(addressSpace.addressPrefixes)', 2),
self.check('dhcpOptions.dnsServers[0]', '1.2.3.4')
])
self.cmd('network vnet subnet create --resource-group {rg} --vnet-name {vnet} --name {subnet} --address-prefix 20.0.0.0/24')
self.cmd('network vnet list-available-ips -g {rg} --name {vnet}', checks=[
self.check('length(@)', 5)
])
class NetworkVNetCachingScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_vnet_cache_test')
def test_network_vnet_caching(self, resource_group):
from time import sleep
self.kwargs.update({
'vnet': 'vnet1'
})
# test that custom commands work with caching
self.cmd('network vnet create -g {rg} -n {vnet} --address-prefix 10.0.0.0/16 --defer')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet} -n subnet1 --address-prefix 10.0.0.0/24 --defer')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet} -n subnet2 --address-prefix 10.0.1.0/24 --defer')
with self.assertRaisesRegexp(SystemExit, '3'):
# ensure vnet has not been created
self.cmd('network vnet show -g {rg} -n {vnet}')
self.cmd('cache show -g {rg} -n {vnet} -t VirtualNetwork')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet} -n subnet3 --address-prefix 10.0.2.0/24')
self.cmd('network vnet show -g {rg} -n {vnet}',
checks=self.check('length(subnets)', 3))
with self.assertRaisesRegexp(CLIError, 'Not found in cache'):
self.cmd('cache show -g {rg} -n {vnet} -t VirtualNetwork')
# test that generic update works with caching
self.cmd('network vnet update -g {rg} -n {vnet} --set tags.a=1 --defer')
self.cmd('network vnet update -g {rg} -n {vnet} --set tags.b=2')
self.cmd('network vnet show -g {rg} -n {vnet}', checks=[
self.check('length(tags)', 2),
self.check('length(subnets)', 3) # should reflect the write-through behavior from the earlier PUT
])
@live_only()
@ResourceGroupPreparer(name_prefix='cli_test_vnet_ids_query')
def test_network_vnet_ids_query(self, resource_group):
import json
# This test ensures that --query works with --ids
self.kwargs.update({
'vnet1': 'vnet1',
'vnet2': 'vnet2'
})
self.kwargs['id1'] = self.cmd('network vnet create -g {rg} -n {vnet1}').get_output_in_json()['newVNet']['id']
self.kwargs['id2'] = self.cmd('network vnet create -g {rg} -n {vnet2}').get_output_in_json()['newVNet']['id']
self.cmd('network vnet show --ids {id1} {id2} --query "[].name"', checks=[
self.check('length(@)', 2),
self.check("contains(@, '{vnet1}')", True),
self.check("contains(@, '{vnet2}')", True),
])
# This test ensures you can pipe a list of IDs to --ids
self.kwargs['ids'] = self.cmd('network vnet list -g {rg} --query "[].id" -otsv').output
self.cmd('network vnet show --ids {ids}',
checks=self.check('length(@)', 2))
# This test ensures you can pipe JSON output to --ids Windows-style
# ensures a single JSON string has its ids parsed out
self.kwargs['json'] = json.dumps(self.cmd('network vnet list -g {rg}').get_output_in_json())
self.cmd('network vnet show --ids \'{json}\'',
checks=self.check('length(@)', 2))
# This test ensures you can pipe JSON output to --ids bash-style
# ensures that a JSON string where each line is interpretted individually
# is reassembled and treated as a single json string
json_obj = self.cmd('network vnet list -g {rg}').get_output_in_json()
for item in json_obj:
del item['etag']
split_json = json.dumps(json_obj, indent=4).split()
split_string = ' '.join(split_json).replace('{', '{{').replace('}', '}}').replace('"', '\\"')
self.cmd('network vnet show --ids {}'.format(split_string),
checks=self.check('length(@)', 2))
class NetworkVNetPeeringScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vnet_peering')
def test_network_vnet_peering(self, resource_group):
# create two vnets with non-overlapping prefixes
self.cmd('network vnet create -g {rg} -n vnet1')
self.cmd('network vnet create -g {rg} -n vnet2 --subnet-name GatewaySubnet --address-prefix 11.0.0.0/16 --subnet-prefix 11.0.0.0/24')
# create supporting resources for gateway
self.cmd('network public-ip create -g {rg} -n ip1')
ip_id = self.cmd('network public-ip show -g {rg} -n ip1 --query id').get_output_in_json()
vnet_id = self.cmd('network vnet show -g {rg} -n vnet2 --query id').get_output_in_json()
self.kwargs.update({
'ip_id': ip_id,
'vnet_id': vnet_id
})
# create the gateway on vnet2
self.cmd('network vnet-gateway create -g {rg} -n gateway1 --public-ip-address {ip_id} --vnet {vnet_id} --tags foo=doo')
vnet1_id = self.cmd('network vnet show -g {rg} -n vnet1 --query id').get_output_in_json()
vnet2_id = self.cmd('network vnet show -g {rg} -n vnet2 --query id').get_output_in_json()
self.kwargs.update({
'vnet1_id': vnet1_id,
'vnet2_id': vnet2_id
})
# set up gateway sharing from vnet1 to vnet2. test that remote-vnet indeed accepts name or id.
self.cmd('network vnet peering create -g {rg} -n peering2 --vnet-name vnet2 --remote-vnet {vnet1_id} --allow-gateway-transit', checks=[
self.check('allowGatewayTransit', True),
self.check('remoteVirtualNetwork.id', '{vnet1_id}'),
self.check('peeringState', 'Initiated')
])
self.cmd('network vnet peering create -g {rg} -n peering1 --vnet-name vnet1 --remote-vnet vnet2 --use-remote-gateways --allow-forwarded-traffic', checks=[
self.check('useRemoteGateways', True),
self.check('remoteVirtualNetwork.id', '{vnet2_id}'),
self.check('peeringState', 'Connected'),
self.check('allowVirtualNetworkAccess', False)
])
self.cmd('network vnet peering show -g {rg} -n peering1 --vnet-name vnet1',
checks=self.check('name', 'peering1'))
self.cmd('network vnet peering list -g {rg} --vnet-name vnet2', checks=[
self.check('[0].name', 'peering2'),
self.check('length(@)', 1)
])
self.cmd('network vnet peering update -g {rg} -n peering1 --vnet-name vnet1 --set useRemoteGateways=false', checks=[
self.check('useRemoteGateways', False),
self.check('allowForwardedTraffic', True)
])
self.cmd('network vnet peering delete -g {rg} -n peering1 --vnet-name vnet1')
self.cmd('network vnet peering list -g {rg} --vnet-name vnet1',
checks=self.is_empty())
# must delete the second peering and the gateway or the resource group delete will fail
self.cmd('network vnet peering delete -g {rg} -n peering2 --vnet-name vnet2')
self.cmd('network vnet-gateway delete -g {rg} -n gateway1')
class NetworkVpnConnectionIpSecPolicy(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vpn_connection_ipsec')
def test_network_vpn_connection_ipsec(self, resource_group):
self.kwargs.update({
'vnet1': 'vnet1',
'vnet_prefix1': '10.11.0.0/16',
'vnet_prefix2': '10.12.0.0/16',
'fe_sub1': 'FrontEnd',
'fe_sub_prefix1': '10.11.0.0/24',
'be_sub1': 'BackEnd',
'be_sub_prefix1': '10.12.0.0/24',
'gw_sub1': 'GatewaySubnet',
'gw_sub_prefix1': '10.12.255.0/27',
'gw1ip': 'pip1',
'gw1': 'gw1',
'gw1_sku': 'Standard',
'lgw1': 'lgw1',
'lgw1ip': '131.107.72.22',
'lgw1_prefix1': '10.61.0.0/16',
'lgw1_prefix2': '10.62.0.0/16',
'conn1': 'conn1'
})
self.cmd('network vnet create -g {rg} -n {vnet1} --address-prefix {vnet_prefix1} {vnet_prefix2}')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet1} -n {fe_sub1} --address-prefix {fe_sub_prefix1}')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet1} -n {be_sub1} --address-prefix {be_sub_prefix1}')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet1} -n {gw_sub1} --address-prefix {gw_sub_prefix1}')
self.cmd('network public-ip create -g {rg} -n {gw1ip}')
self.cmd('network vnet-gateway create -g {rg} -n {gw1} --public-ip-address {gw1ip} --vnet {vnet1} --sku {gw1_sku}')
self.cmd('network local-gateway create -g {rg} -n {lgw1} --gateway-ip-address {lgw1ip} --local-address-prefixes {lgw1_prefix1} {lgw1_prefix2}')
self.cmd('network vpn-connection create -g {rg} -n {conn1} --vnet-gateway1 {gw1} --local-gateway2 {lgw1} --shared-key AzureA1b2C3')
self.cmd('network vpn-connection ipsec-policy add -g {rg} --connection-name {conn1} --ike-encryption AES256 --ike-integrity SHA384 --dh-group DHGroup24 --ipsec-encryption GCMAES256 --ipsec-integrity GCMAES256 --pfs-group PFS24 --sa-lifetime 7200 --sa-max-size 2048')
self.cmd('network vpn-connection ipsec-policy list -g {rg} --connection-name {conn1}')
self.cmd('network vpn-connection ipsec-policy clear -g {rg} --connection-name {conn1}')
self.cmd('network vpn-connection ipsec-policy list -g {rg} --connection-name {conn1}')
class NetworkVnetGatewayIpSecPolicy(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vnet_gateway_ipsec')
def test_network_vnet_gateway_ipsec(self, resource_group):
self.kwargs.update({
'vnet': 'vnet1',
'ip': 'pip1',
'gw': 'gw1',
'gw_sku': 'VpnGw2',
})
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name GatewaySubnet')
self.cmd('network public-ip create -g {rg} -n {ip}')
self.cmd('network vnet-gateway create -g {rg} -n {gw} --public-ip-address {ip} --vnet {vnet} --sku {gw_sku} --gateway-type Vpn --vpn-type RouteBased --address-prefix 40.1.0.0/24 --client-protocol IkeV2 SSTP --radius-secret 111_aaa --radius-server 30.1.1.15')
self.cmd('network vnet-gateway ipsec-policy add -g {rg} --gateway-name {gw} --ike-encryption AES256 --ike-integrity SHA384 --dh-group DHGroup24 --ipsec-encryption GCMAES256 --ipsec-integrity GCMAES256 --pfs-group PFS24 --sa-lifetime 7200 --sa-max-size 2048')
self.cmd('network vnet-gateway ipsec-policy list -g {rg} --gateway-name {gw}')
self.cmd('network vnet-gateway ipsec-policy clear -g {rg} --gateway-name {gw}')
self.cmd('network vnet-gateway ipsec-policy list -g {rg} --gateway-name {gw}')
class NetworkVirtualRouter(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_virtual_router', location='WestCentralUS')
@unittest.skip('Skip as service has bug for deleting peering')
def test_network_virtual_router_scenario(self, resource_group, resource_group_location):
self.kwargs.update({
'rg': resource_group,
'location': resource_group_location,
'vnet': 'vnet1',
'ip': 'pip1',
'gw': 'gw1',
'gw_sku': 'HighPerformance',
'vrouter': 'vrouter1',
'vrouter_peering': 'peering1'
})
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name GatewaySubnet -l {location} --subnet-name GatewaySubnet')
self.cmd('network public-ip create -g {rg} -n {ip} -l {location}')
self.cmd('network vnet-gateway create -g {rg} -n {gw} --public-ip-address {ip} --vnet {vnet} --sku {gw_sku} --gateway-type ExpressRoute -l {location}')
self.cmd('network vrouter create -n {vrouter} -l {location} -g {rg} --hosted-gateway {gw}', checks=[
self.check('type', 'Microsoft.Network/VirtualRouters'),
self.check('name', '{vrouter}')
])
self.cmd('network vrouter show -n {vrouter} -g {rg}', checks=[
self.check('name', '{vrouter}')
])
self.cmd('network vrouter list -g {rg}', checks=[
self.check('@[0].name', '{vrouter}')
])
self.cmd('network vrouter peering create -n {vrouter_peering} --peer-asn 10000 --peer-ip 10.0.0.0 -g {rg} --vrouter-name {vrouter}', checks=[
self.check('name', '{vrouter_peering}')
])
self.cmd('network vrouter peering update -n {vrouter_peering} --peer-asn 11000 --peer-ip 11.0.0.0 -g {rg} --vrouter-name {vrouter}', checks=[
self.check('peerAsn', '11000'),
self.check('peerIp', '11.0.0.0')
])
self.cmd('network vrouter peering show -n {vrouter_peering} -g {rg} --vrouter-name {vrouter}', checks=[
self.check('name', '{vrouter_peering}')
])
self.cmd('network vrouter peering list -g {rg} --vrouter-name {vrouter}', checks=[
self.check('@[0].name', '{vrouter_peering}'),
self.check('length(@)', 1)
])
self.cmd('network vrouter peering delete -n {vrouter_peering} -g {rg} --vrouter-name {vrouter}')
self.cmd('network vrouter delete -g {rg} -n {vrouter}')
@record_only() # this feature need resource from service team for now.
@ResourceGroupPreparer(name_prefix='cli_test_virtual_router', location='eastus2euap')
def test_vrouter_with_virtual_hub_support(self, resource_group, resource_group_location):
self.kwargs.update({
'rg': 'test_vrouter_with_virtual_hub_support', # the subscription needs to be a specified one given by service team
'location': resource_group_location,
'vnet': 'vnet2',
'subnet1': 'subnet1',
'subnet2': 'subnet2',
'vrouter': 'vrouter2',
'peer': 'peer1'
})
self.cmd('network vnet create -g {rg} -n {vnet} '
'--location {location} '
'--subnet-name {subnet1} '
'--address-prefix 10.0.0.0/24')
# a cleanup program runs in short peoridically to assign subnets a NSG within that subscription
# which will block subnet is assigned to the virtual router
self.cmd('network vnet subnet update -g {rg} --vnet-name {vnet} -n {subnet1} --remove networkSecurityGroup')
vnet = self.cmd('network vnet show -g {rg} -n {vnet}').get_output_in_json()
self.kwargs.update({
'subnet1_id': vnet['subnets'][0]['id']
})
self.cmd('network vrouter create -g {rg} -l {location} -n {vrouter} --hosted-subnet {subnet1_id}', checks=[
self.check('type', 'Microsoft.Network/virtualHubs'),
self.check('ipConfigurations', None),
self.check('provisioningState', 'Succeeded')
])
self.cmd('network vrouter list -g {rg}')
self.cmd('network vrouter show -g {rg} -n {vrouter}', checks=[
self.check('virtualRouterAsn', 65515),
self.check('length(virtualRouterIps)', 2),
])
self.cmd('network vrouter peering create -g {rg} --vrouter-name {vrouter} -n {peer} '
'--peer-asn 11000 --peer-ip 10.0.0.120')
self.cmd('network vrouter peering list -g {rg} --vrouter-name {vrouter}')
self.cmd('network vrouter peering show -g {rg} --vrouter-name {vrouter} -n {peer}')
# unable to update unless the ASN's range is required
# self.cmd('network vrouter peering update -g {rg} --vrouter-name {vrouter} -n {peer} --peer-ip 10.0.0.0')
self.cmd('network vrouter peering delete -g {rg} --vrouter-name {vrouter} -n {peer}')
self.cmd('network vrouter delete -g {rg} -n {vrouter}')
class NetworkVirtualHubRouter(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_virtual_hub_router', location='centraluseuap')
def test_network_virtual_hub_router_scenario(self, resource_group, resource_group_location):
self.kwargs.update({
'rg': resource_group,
'location': resource_group_location,
'vnet': 'vnet2',
'subnet1': 'RouteServerSubnet',
'vrouter': 'vrouter2',
'peer': 'peer1'
})
self.cmd('network vnet create -g {rg} -n {vnet} '
'--location {location} '
'--subnet-name {subnet1} '
'--address-prefix 10.0.0.0/24')
# a cleanup program runs in short peoridically to assign subnets a NSG within that subscription
# which will block subnet is assigned to the virtual router
self.cmd('network vnet subnet update -g {rg} --vnet-name {vnet} -n {subnet1} --remove networkSecurityGroup')
vnet = self.cmd('network vnet show -g {rg} -n {vnet}').get_output_in_json()
self.kwargs.update({
'subnet1_id': vnet['subnets'][0]['id']
})
self.cmd('network routeserver create -g {rg} -l {location} -n {vrouter} '
'--hosted-subnet {subnet1_id}',
checks=[
self.check('type', 'Microsoft.Network/virtualHubs'),
self.check('ipConfigurations', None),
self.check('provisioningState', 'Succeeded')
])
self.cmd('network routeserver update -g {rg} --name {vrouter} --allow-b2b-traffic', checks=[
self.check('allowBranchToBranchTraffic', True)
])
self.cmd('network routeserver list -g {rg}')
self.cmd('network routeserver show -g {rg} -n {vrouter}', checks=[
self.check('virtualRouterAsn', 65515),
self.check('length(virtualRouterIps)', 2),
])
self.cmd('network routeserver peering create -g {rg} --routeserver {vrouter} -n {peer} '
'--peer-asn 11000 --peer-ip 10.0.0.120')
self.cmd('network routeserver peering list -g {rg} --routeserver {vrouter}')
self.cmd('network routeserver peering show -g {rg} --routeserver {vrouter} -n {peer}')
self.cmd('network routeserver peering list-advertised-routes -g {rg} --routeserver {vrouter} -n {peer}')
self.cmd('network routeserver peering list-learned-routes -g {rg} --routeserver {vrouter} -n {peer}')
# unable to update unless the ASN's range is required
# self.cmd('network routeserver peering update -g {rg} --routeserver {vrouter} -n {peer} --peer-ip 10.0.0.0')
self.cmd('network routeserver peering delete -g {rg} --routeserver {vrouter} -n {peer} -y')
self.cmd('network routeserver delete -g {rg} -n {vrouter} -y')
class NetworkSubnetScenarioTests(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_subnet_set_test')
def test_network_subnet_set(self, resource_group):
self.kwargs.update({
'vnet': 'vnet1',
'vnet_prefix': '123.0.0.0/16',
'subnet': 'default',
'subnet_prefix': '123.0.0.0/24',
'subnet_prefix2': '123.0.5.0/24',
'nsg': 'test-vnet-nsg'
})
self.cmd('network vnet create --resource-group {rg} --name {vnet} --address-prefix {vnet_prefix} --subnet-name {subnet} --subnet-prefix {subnet_prefix}')
self.cmd('network nsg create --resource-group {rg} --name {nsg}')
# Test we can update the address space and nsg
self.cmd('network vnet subnet update --resource-group {rg} --vnet-name {vnet} --name {subnet} --address-prefix {subnet_prefix2} --network-security-group {nsg}', checks=[
self.check('addressPrefix', '{subnet_prefix2}'),
self.check('ends_with(@.networkSecurityGroup.id, `/{nsg}`)', True)
])
# test generic update
self.cmd('network vnet subnet update -g {rg} --vnet-name {vnet} -n {subnet} --set addressPrefix=123.0.0.0/24',
checks=self.check('addressPrefix', '123.0.0.0/24'))
# Test we can get rid of the nsg.
self.cmd('network vnet subnet update --resource-group {rg} --vnet-name {vnet} --name {subnet} --address-prefix {subnet_prefix2} --network-security-group \"\"',
checks=self.check('networkSecurityGroup', None))
self.cmd('network vnet delete --resource-group {rg} --name {vnet}')
self.cmd('network nsg delete --resource-group {rg} --name {nsg}')
@ResourceGroupPreparer(name_prefix='cli_subnet_endpoint_service_test')
def test_network_subnet_endpoint_service(self, resource_group):
self.kwargs.update({
'vnet': 'vnet1',
'subnet': 'subnet1'
})
result = self.cmd('network vnet list-endpoint-services -l westus').get_output_in_json()
self.assertGreaterEqual(len(result), 2)
self.cmd('network vnet create -g {rg} -n {vnet}')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet} -n {subnet} --address-prefix 10.0.1.0/24 --service-endpoints Microsoft.Storage',
checks=self.check('serviceEndpoints[0].service', 'Microsoft.Storage'))
self.cmd('network vnet subnet update -g {rg} --vnet-name {vnet} -n {subnet} --service-endpoints ""',
checks=self.check('serviceEndpoints', None))
@ResourceGroupPreparer(name_prefix='cli_subnet_delegation')
def test_network_subnet_delegation(self, resource_group):
self.kwargs.update({
'vnet': 'vnet1',
'subnet': 'subnet1',
})
result = self.cmd('network vnet subnet list-available-delegations -l westcentralus').get_output_in_json()
self.assertTrue(len(result) > 1, True)
result = self.cmd('network vnet subnet list-available-delegations -g {rg}').get_output_in_json()
self.assertTrue(len(result) > 1, True)
self.cmd('network vnet create -g {rg} -n {vnet} -l westcentralus')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet} -n {subnet} --address-prefix 10.0.0.0/24 --delegations Microsoft.Web.serverFarms', checks=[
self.check('delegations[0].serviceName', 'Microsoft.Web/serverFarms')
])
# verify the update command, and that CLI validation will accept either serviceName or Name
self.cmd('network vnet subnet update -g {rg} --vnet-name {vnet} -n {subnet} --delegations Microsoft.Sql/managedInstances',
checks=self.check('delegations[0].serviceName', 'Microsoft.Sql/managedInstances'))
@ResourceGroupPreparer(name_prefix='test_subnet_with_private_endpoint_option')
def test_subnet_with_private_endpoint_and_private_Link_options(self, resource_group):
self.kwargs.update({
'vnet': 'MyVnet',
'subnet1': 'MySubnet1',
'subnet2': 'MySubnet2',
'subnet3': 'MySubnet3',
})
self.cmd('network vnet create -g {rg} -n {vnet}')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet} '
'--address-prefixes 10.0.1.0/24 '
'--name {subnet1} '
'--disable-private-endpoint-network-policies true', checks=[
self.check('addressPrefix', '10.0.1.0/24'),
self.check('privateEndpointNetworkPolicies', 'Disabled'),
self.check('privateLinkServiceNetworkPolicies', 'Enabled')
])
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet} '
'--address-prefixes 10.0.2.0/24 '
'--name {subnet2} '
'--disable-private-link-service-network-policies true', checks=[
self.check('addressPrefix', '10.0.2.0/24'),
self.check('privateEndpointNetworkPolicies', 'Enabled'),
self.check('privateLinkServiceNetworkPolicies', 'Disabled')
])
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet} '
'--address-prefixes 10.0.3.0/24 '
'--name {subnet3} '
'--disable-private-endpoint-network-policies true '
'--disable-private-link-service-network-policies true', checks=[
self.check('addressPrefix', '10.0.3.0/24'),
self.check('privateEndpointNetworkPolicies', 'Disabled'),
self.check('privateLinkServiceNetworkPolicies', 'Disabled')
])
class NetworkActiveActiveCrossPremiseScenarioTest(ScenarioTest): # pylint: disable=too-many-instance-attributes
@ResourceGroupPreparer(name_prefix='cli_test_active_active_cross_premise_connection')
def test_network_active_active_cross_premise_connection(self, resource_group):
self.kwargs.update({
'vnet1': 'vnet1',
'vnet_prefix1': '10.11.0.0/16',
'vnet_prefix2': '10.12.0.0/16',
'vnet1_asn': 65010,
'gw_subnet': 'GatewaySubnet',
'gw_subnet_prefix': '10.12.255.0/27',
'gw_ip1': 'gwip1',
'gw_ip2': 'gwip2',
'gw1': 'gw1',
'lgw2': 'lgw2',
'lgw_ip': '131.107.72.22',
'lgw_prefix': '10.52.255.253/32',
'bgp_peer1': '10.52.255.253',
'lgw_asn': 65050,
'lgw_loc': 'eastus',
'conn_151': 'Vnet1toSite5_1',
'conn_152': 'Vnet1toSite5_2',
'shared_key': 'abc123',
'shared_key2': 'a1b2c3',
'lgw3': 'lgw3',
'lgw3_ip': '131.107.72.23',
'lgw3_prefix': '10.52.255.254/32',
'bgp_peer2': '10.52.255.254'
})
self.cmd('network vnet create -g {rg} -n {vnet1} --address-prefix {vnet_prefix1} {vnet_prefix2} --subnet-name {gw_subnet} --subnet-prefix {gw_subnet_prefix}')
self.cmd('network public-ip create -g {rg} -n {gw_ip1}')
self.cmd('network public-ip create -g {rg} -n {gw_ip2}')
# create the vnet gateway with active-active feature
self.cmd('network vnet-gateway create -g {rg} -n {gw1} --vnet {vnet1} --sku HighPerformance --asn {vnet1_asn} --public-ip-addresses {gw_ip1} {gw_ip2} --tags foo=doo')
# switch to active-standby
self.cmd('network vnet-gateway update -g {rg} -n {gw1} --vnet {vnet1} --sku HighPerformance --asn {vnet1_asn} --public-ip-addresses {gw_ip1} --no-wait --tags foo=boo')
# create and connect first local-gateway
self.cmd('network local-gateway create -g {rg} -n {lgw2} -l {lgw_loc} --gateway-ip-address {lgw_ip} --local-address-prefixes {lgw_prefix} --asn {lgw_asn} --bgp-peering-address {bgp_peer1}')
self.cmd('network vpn-connection create -g {rg} -n {conn_151} --vnet-gateway1 {gw1} --local-gateway2 {lgw2} --shared-key {shared_key} --enable-bgp')
self.cmd('network vpn-connection shared-key reset -g {rg} --connection-name {conn_151} --key-length 128')
sk1 = self.cmd('network vpn-connection shared-key show -g {rg} --connection-name {conn_151}').get_output_in_json()
self.cmd('network vpn-connection shared-key update -g {rg} --connection-name {conn_151} --value {shared_key2}').get_output_in_json()
sk2 = self.cmd('network vpn-connection shared-key show -g {rg} --connection-name {conn_151}',
checks=self.check('value', '{shared_key2}'))
self.assertNotEqual(sk1, sk2)
# create and connect second local-gateway
self.cmd('network local-gateway create -g {rg} -n {lgw3} -l {lgw_loc} --gateway-ip-address {lgw3_ip} --local-address-prefixes {lgw3_prefix} --asn {lgw_asn} --bgp-peering-address {bgp_peer2}')
self.cmd('network vpn-connection create -g {rg} -n {conn_152} --vnet-gateway1 {gw1} --local-gateway2 {lgw3} --shared-key {shared_key} --enable-bgp')
class NetworkActiveActiveVnetScenarioTest(ScenarioTest): # pylint: disable=too-many-instance-attributes
@ResourceGroupPreparer(name_prefix='cli_test_active_active_vnet_vnet_connection')
def test_network_active_active_vnet_connection(self, resource_group):
self.kwargs.update({
'subnet': 'GatewaySubnet',
'vnet1': 'vnet1',
'vnet1_prefix': '10.21.0.0/16',
'vnet1_asn': 65010,
'gw1': 'vgw1',
'gw1_prefix': '10.21.255.0/27',
'gw1_ip1': 'gw1ip1',
'gw1_ip2': 'gw1ip2',
'vnet2': 'vnet2',
'vnet2_prefix': '10.22.0.0/16',
'vnet2_asn': 65020,
'gw2': 'vgw2',
'gw2_prefix': '10.22.255.0/27',
'gw2_ip1': 'gw2ip1',
'gw2_ip2': 'gw2ip2',
'key': 'abc123',
'conn12': 'vnet1to2',
'conn21': 'vnet2to1',
'bgp_peer1': '10.52.255.253',
'bgp_peer2': '10.53.255.253'
})
# Create one VNet with two public IPs
self.cmd('network vnet create -g {rg} -n {vnet1} --address-prefix {vnet1_prefix} --subnet-name {subnet} --subnet-prefix {gw1_prefix}')
self.cmd('network public-ip create -g {rg} -n {gw1_ip1}')
self.cmd('network public-ip create -g {rg} -n {gw1_ip2}')
# Create second VNet with two public IPs
self.cmd('network vnet create -g {rg} -n {vnet2} --address-prefix {vnet2_prefix} --subnet-name {subnet} --subnet-prefix {gw2_prefix}')
self.cmd('network public-ip create -g {rg} -n {gw2_ip1}')
self.cmd('network public-ip create -g {rg} -n {gw2_ip2}')
self.cmd('network vnet-gateway create -g {rg} -n {gw1} --vnet {vnet1} --sku HighPerformance --asn {vnet1_asn} --public-ip-addresses {gw1_ip1} {gw1_ip2} --bgp-peering-address {bgp_peer1} --no-wait')
self.cmd('network vnet-gateway create -g {rg} -n {gw2} --vnet {vnet2} --sku HighPerformance --asn {vnet2_asn} --public-ip-addresses {gw2_ip1} {gw2_ip2} --bgp-peering-address {bgp_peer2} --no-wait')
# wait for gateway completion to finish
self.cmd('network vnet-gateway wait -g {rg} -n {gw1} --created')
self.cmd('network vnet-gateway wait -g {rg} -n {gw2} --created')
# create and connect the VNet gateways
self.cmd('network vpn-connection create -g {rg} -n {conn12} --vnet-gateway1 {gw1} --vnet-gateway2 {gw2} --shared-key {key} --enable-bgp')
self.cmd('network vpn-connection create -g {rg} -n {conn21} --vnet-gateway1 {gw2} --vnet-gateway2 {gw1} --shared-key {key} --enable-bgp')
class NetworkVpnGatewayScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vpn_gateway')
def test_network_vpn_gateway(self, resource_group):
self.kwargs.update({
'vnet1': 'myvnet1',
'vnet2': 'myvnet2',
'vnet3': 'myvnet3',
'gw1': 'gateway1',
'gw2': 'gateway2',
'gw3': 'gateway3',
'ip1': 'pubip1',
'ip2': 'pubip2',
'ip3': 'pubip3',
'custom_routes1': "101.168.0.6/32",
'custom_routes2': "102.168.0.6/32"
})
self.cmd('network public-ip create -n {ip1} -g {rg}')
self.cmd('network public-ip create -n {ip2} -g {rg}')
self.cmd('network public-ip create -n {ip3} -g {rg}')
self.cmd('network vnet create -g {rg} -n {vnet1} --subnet-name GatewaySubnet --address-prefix 10.0.0.0/16 --subnet-prefix 10.0.0.0/24')
self.cmd('network vnet create -g {rg} -n {vnet2} --subnet-name GatewaySubnet --address-prefix 10.1.0.0/16')
self.cmd('network vnet create -g {rg} -n {vnet3} --subnet-name GatewaySubnet --address-prefix 10.2.0.0/16')
self.kwargs.update({'sub': self.get_subscription_id()})
self.kwargs.update({
'vnet1_id': '/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetworks/{vnet1}'.format(**self.kwargs),
'vnet2_id': '/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetworks/{vnet2}'.format(**self.kwargs)
})
with self.assertRaisesRegexp(CLIError, 'vpn_gateway_generation should not be provided if gateway_type is not Vpn.'):
self.cmd(
'network vnet-gateway create -g {rg} -n {gw1} --vnet {vnet1_id} --public-ip-address {ip1} --gateway-type ExpressRoute --vpn-gateway-generation Generation1')
self.cmd('network vnet-gateway create -g {rg} -n {gw1} --vnet {vnet1_id} --public-ip-address {ip1} --vpn-gateway-generation Generation1 --custom-routes {custom_routes1} --no-wait')
self.cmd('network vnet-gateway create -g {rg} -n {gw2} --vnet {vnet2_id} --public-ip-address {ip2} --no-wait')
self.cmd('network vnet-gateway create -g {rg} -n {gw3} --vnet {vnet3} --public-ip-address {ip3} --no-wait --sku standard --asn 12345 --bgp-peering-address 10.2.250.250 --peer-weight 50')
self.cmd('network vnet-gateway wait -g {rg} -n {gw1} --created')
self.cmd('network vnet-gateway wait -g {rg} -n {gw2} --created')
self.cmd('network vnet-gateway wait -g {rg} -n {gw3} --created')
self.cmd('network vnet-gateway show -g {rg} -n {gw1}', checks=[
self.check('gatewayType', 'Vpn'),
self.check('sku.capacity', 2),
self.check('sku.name', 'Basic'),
self.check('vpnType', 'RouteBased'),
self.check('vpnGatewayGeneration', 'Generation1'),
self.check('enableBgp', False),
self.check('customRoutes.addressPrefixes[0]', self.kwargs['custom_routes1'])
])
self.cmd('network vnet-gateway update -g {rg} -n {gw1} --sku Standard --custom-routes {custom_routes1} {custom_routes2}', checks=[
self.check('length(customRoutes.addressPrefixes)', 2)
])
self.cmd('network vnet-gateway show -g {rg} -n {gw2}', checks=[
self.check('gatewayType', 'Vpn'),
self.check('sku.capacity', 2),
self.check('sku.name', 'Basic'),
self.check('vpnType', 'RouteBased'),
self.check('enableBgp', False)
])
self.cmd('network vnet-gateway show -g {rg} -n {gw3}', checks=[
self.check('sku.name', 'Standard'),
self.check('enableBgp', True),
self.check('bgpSettings.asn', 12345),
self.check('bgpSettings.bgpPeeringAddress', '10.2.250.250'),
self.check('bgpSettings.peerWeight', 50)
])
self.kwargs.update({
'conn12': 'conn1to2',
'conn21': 'conn2to1',
'gw1_id': '/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetworkGateways/{gw1}'.format(**self.kwargs)
})
self.cmd('network vpn-connection create -n {conn12} -g {rg} --shared-key 123 --vnet-gateway1 {gw1_id} --vnet-gateway2 {gw2} --tags foo=doo')
self.cmd('network vpn-connection update -n {conn12} -g {rg} --routing-weight 25 --tags foo=boo',
checks=self.check('routingWeight', 25))
self.cmd('network vpn-connection create -n {conn21} -g {rg} --shared-key 123 --vnet-gateway2 {gw1_id} --vnet-gateway1 {gw2}')
self.cmd('network vnet-gateway list-learned-routes -g {rg} -n {gw1}')
self.cmd('network vnet-gateway list-advertised-routes -g {rg} -n {gw1} --peer 10.1.1.1')
self.cmd('network vnet-gateway list-bgp-peer-status -g {rg} -n {gw1} --peer 10.1.1.1')
@ResourceGroupPreparer(name_prefix='cli_test_vpn_gateway_aad_')
def test_network_vpn_gateway_aad(self, resource_group):
self.kwargs.update({
'vnet': 'vnet',
'gw': 'gw',
'ip': 'ip',
'aad_tenant': 'https://login.microsoftonline.com/0ab2c4f4-81e6-44cc-a0b2-b3a47a1443f4',
'aad_issuer': 'https://sts.windows.net/0ab2c4f4-81e6-44cc-a0b2-b3a47a1443f4/',
'aad_audience': 'a21fce82-76af-45e6-8583-a08cb3b956f9'
})
self.cmd('network public-ip create -g {rg} -n {ip} ')
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name GatewaySubnet')
self.cmd('network vnet-gateway create -g {rg} -n {gw} '
'--vnet {vnet} --public-ip-address {ip} '
'--gateway-type Vpn --vpn-type RouteBased '
'--sku VpnGw1 '
'--client-protocol OpenVPN '
'--address-prefixes 201.169.0.0/16 ')
self.cmd('network vnet-gateway show -g {rg} -n {gw}', checks=[
self.check('sku.name', 'VpnGw1'),
self.check('enableBgp', False),
self.check('vpnType', 'RouteBased'),
self.check('vpnClientConfiguration.vpnClientProtocols[0]', "OpenVPN")
])
self.cmd('network vnet-gateway aad assign -g {rg} --gateway-name {gw} '
'--tenant {aad_tenant} '
'--audience {aad_audience} '
'--issuer {aad_issuer} ')
self.cmd('network vnet-gateway show -g {rg} -n {gw}', checks=[
self.check('vpnClientConfiguration.aadTenant', self.kwargs['aad_tenant']),
self.check('vpnClientConfiguration.aadIssuer', self.kwargs['aad_issuer']),
self.check('vpnClientConfiguration.aadAudience', self.kwargs['aad_audience'])
])
self.cmd('network vnet-gateway aad show -g {rg} --gateway-name {gw}', checks=[
self.check('aadTenant', self.kwargs['aad_tenant'])
])
self.cmd('network vnet-gateway aad remove -g {rg} --gateway-name {gw}').get_output_in_json()
self.cmd('network vnet-gateway aad show -g {rg} --gateway-name {gw}', checks=[
self.check('aadTenant', None),
self.check('aadIssuer', None),
self.check('aadAudience', None)
])
class NetworkVpnClientPackageScenarioTest(LiveScenarioTest):
@ResourceGroupPreparer('cli_test_vpn_client_package')
def test_vpn_client_package(self, resource_group):
self.kwargs.update({
'vnet': 'vnet1',
'public_ip': 'pip1',
'gateway_prefix': '100.1.1.0/24',
'gateway': 'vgw1',
'cert': 'cert1',
'cert_path': os.path.join(TEST_DIR, 'test-root-cert.cer')
})
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name GatewaySubnet')
self.cmd('network public-ip create -g {rg} -n {public_ip}')
self.cmd('network vnet-gateway create -g {rg} -n {gateway} --address-prefix {gateway_prefix} --vnet {vnet} --public-ip-address {public_ip}')
self.cmd('network vnet-gateway root-cert create -g {rg} --gateway-name {gateway} -n {cert} --public-cert-data "{cert_path}"')
output = self.cmd('network vnet-gateway vpn-client generate -g {rg} -n {gateway}').get_output_in_json()
self.assertTrue('.zip' in output, 'Expected ZIP file in output.\nActual: {}'.format(str(output)))
output = self.cmd('network vnet-gateway vpn-client show-url -g {rg} -n {gateway}').get_output_in_json()
self.assertTrue('.zip' in output, 'Expected ZIP file in output.\nActual: {}'.format(str(output)))
class NetworkTrafficManagerScenarioTest(ScenarioTest):
@ResourceGroupPreparer('cli_test_traffic_manager')
def test_network_traffic_manager(self, resource_group):
self.kwargs.update({
'tm': 'mytmprofile',
'endpoint': 'myendpoint',
'dns': 'mytrafficmanager001100a'
})
self.cmd('network traffic-manager profile check-dns -n myfoobar1')
self.cmd('network traffic-manager profile create -n {tm} -g {rg} --routing-method priority --unique-dns-name {dns} --tags foo=doo',
checks=self.check('TrafficManagerProfile.trafficRoutingMethod', 'Priority'))
self.cmd('network traffic-manager profile show -g {rg} -n {tm}',
checks=self.check('dnsConfig.relativeName', '{dns}'))
self.cmd('network traffic-manager profile update -n {tm} -g {rg} --routing-method weighted --tags foo=boo',
checks=self.check('trafficRoutingMethod', 'Weighted'))
self.cmd('network traffic-manager profile list -g {rg}')
# Endpoint tests
self.cmd('network traffic-manager endpoint create -n {endpoint} --profile-name {tm} -g {rg} --type externalEndpoints --weight 50 --target www.microsoft.com',
checks=self.check('type', 'Microsoft.Network/trafficManagerProfiles/externalEndpoints'))
self.cmd('network traffic-manager endpoint update -n {endpoint} --profile-name {tm} -g {rg} --type externalEndpoints --weight 25 --target www.contoso.com', checks=[
self.check('weight', 25),
self.check('target', 'www.contoso.com')
])
self.cmd('network traffic-manager endpoint show -g {rg} --profile-name {tm} -t externalEndpoints -n {endpoint}')
self.cmd('network traffic-manager endpoint list -g {rg} --profile-name {tm} -t externalEndpoints',
checks=self.check('length(@)', 1))
# ensure a profile with endpoints can be updated
self.cmd('network traffic-manager profile update -n {tm} -g {rg}')
self.cmd('network traffic-manager endpoint delete -g {rg} --profile-name {tm} -t externalEndpoints -n {endpoint}')
self.cmd('network traffic-manager endpoint list -g {rg} --profile-name {tm} -t externalEndpoints',
checks=self.check('length(@)', 0))
self.cmd('network traffic-manager profile delete -g {rg} -n {tm}')
@ResourceGroupPreparer('cli_test_traffic_manager2')
def test_network_traffic_manager2(self, resource_group):
self.kwargs.update({
'tm': 'mytmprofile2',
'dns': 'mytrafficmanager001100a2'
})
self.cmd('network traffic-manager profile create -n {tm} -g {rg} --routing-method Multivalue --unique-dns-name {dns} --max-return 3 --tags foo=doo',
checks=self.check('TrafficManagerProfile.trafficRoutingMethod', 'MultiValue'))
self.cmd('network traffic-manager profile update -n {tm} -g {rg} --routing-method MultiValue --max-return 4 --tags foo=boo',
checks=self.check('maxReturn', 4))
@ResourceGroupPreparer('cli_test_traffic_manager_subnet')
def test_network_traffic_manager_subnet_routing(self, resource_group):
self.kwargs.update({
'tm': 'tm1',
'endpoint': 'ep1',
'dns': self.create_random_name('testtm', 20),
'pip': 'ip1',
'ip_dns': self.create_random_name('testpip', 20)
})
self.cmd('network traffic-manager profile create -n {tm} -g {rg} --routing-method subnet --unique-dns-name {dns} --custom-headers foo=bar --status-code-ranges 200-202', checks=[
self.check('TrafficManagerProfile.monitorConfig.expectedStatusCodeRanges[0].min', 200),
self.check('TrafficManagerProfile.monitorConfig.expectedStatusCodeRanges[0].max', 202),
self.check('TrafficManagerProfile.monitorConfig.customHeaders[0].name', 'foo'),
self.check('TrafficManagerProfile.monitorConfig.customHeaders[0].value', 'bar')
])
self.kwargs['ip_id'] = self.cmd('network public-ip create -g {rg} -n {pip} --dns-name {ip_dns} --query publicIp.id').get_output_in_json()
self.cmd('network traffic-manager profile update -n {tm} -g {rg} --status-code-ranges 200-204 --custom-headers foo=doo test=best', checks=[
self.check('monitorConfig.expectedStatusCodeRanges[0].min', 200),
self.check('monitorConfig.expectedStatusCodeRanges[0].max', 204),
self.check('monitorConfig.customHeaders[0].name', 'foo'),
self.check('monitorConfig.customHeaders[0].value', 'doo'),
self.check('monitorConfig.customHeaders[1].name', 'test'),
self.check('monitorConfig.customHeaders[1].value', 'best')
])
# Endpoint tests
self.cmd('network traffic-manager endpoint create -n {endpoint} --profile-name {tm} -g {rg} --type azureEndpoints --target-resource-id {ip_id} --subnets 10.0.0.0 --custom-headers test=best', checks=[
self.check('customHeaders[0].name', 'test'),
self.check('customHeaders[0].value', 'best'),
self.check('subnets[0].first', '10.0.0.0')
])
self.cmd('network traffic-manager endpoint update -n {endpoint} --type azureEndpoints --profile-name {tm} -g {rg} --subnets 10.0.0.0:24', checks=[
self.check('subnets[0].first', '10.0.0.0'),
self.check('subnets[0].scope', '24')
])
self.cmd('network traffic-manager endpoint update -n {endpoint} --type azureEndpoints --profile-name {tm} -g {rg} --subnets 10.0.0.0-11.0.0.0', checks=[
self.check('subnets[0].first', '10.0.0.0'),
self.check('subnets[0].last', '11.0.0.0')
])
class NetworkWatcherConfigureScenarioTest(LiveScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_nw', location='westcentralus')
def test_network_watcher_configure(self, resource_group):
self.cmd('network watcher configure -g {rg} --locations westus westus2 westcentralus eastus canadaeast --enabled')
self.cmd('network watcher configure --locations westus westus2 eastus canadaeast --tags foo=doo')
self.cmd('network watcher configure -l westus2 --enabled false')
self.cmd('network watcher list')
class NetworkWatcherScenarioTest(ScenarioTest):
import mock
def _mock_thread_count():
return 1
@mock.patch('azure.cli.command_modules.vm._actions._get_thread_count', _mock_thread_count)
@ResourceGroupPreparer(name_prefix='cli_test_nw_vm', location='westus')
@AllowLargeResponse()
def test_network_watcher_vm(self, resource_group, resource_group_location):
self.kwargs.update({
'loc': 'westcentralus',
'vm': 'vm1',
'nsg': 'nsg1',
'capture': 'capture1',
'private-ip': '10.0.0.9'
})
vm = self.cmd('vm create -g {rg} -n {vm} --image UbuntuLTS --authentication-type password --admin-username deploy --admin-password PassPass10!) --nsg {nsg} --nsg-rule None --private-ip-address {private-ip}').get_output_in_json()
self.kwargs['vm_id'] = vm['id']
self.cmd('vm extension set -g {rg} --vm-name {vm} -n NetworkWatcherAgentLinux --publisher Microsoft.Azure.NetworkWatcher')
self.cmd('network watcher test-connectivity -g {rg} --source-resource {vm} --dest-address www.microsoft.com --dest-port 80 --valid-status-codes 200 202')
self.cmd('network watcher run-configuration-diagnostic --resource {vm_id} --direction Inbound --protocol TCP --source 12.11.12.14 --destination 10.1.1.4 --port 12100')
self.cmd('network watcher show-topology -g {rg}')
self.cmd('network watcher test-ip-flow -g {rg} --vm {vm} --direction inbound --local {private-ip}:22 --protocol tcp --remote 100.1.2.3:*')
self.cmd('network watcher test-ip-flow -g {rg} --vm {vm} --direction outbound --local {private-ip}:* --protocol tcp --remote 100.1.2.3:80')
self.cmd('network watcher show-security-group-view -g {rg} --vm {vm}')
self.cmd('network watcher show-next-hop -g {rg} --vm {vm} --source-ip 10.0.0.9 --dest-ip 10.0.0.6')
@ResourceGroupPreparer(name_prefix='cli_test_nw_flow_log', location='westus')
@StorageAccountPreparer(name_prefix='clitestnw', location='westus', kind='StorageV2')
def test_network_watcher_flow_log(self, resource_group, resource_group_location, storage_account):
self.kwargs.update({
'loc': resource_group_location,
'nsg': 'nsg1',
'sa': storage_account,
'ws': self.create_random_name('testws', 20),
'la_prop_path': os.path.join(TEST_DIR, 'loganalytics.json')
})
self.cmd('network nsg create -g {rg} -n {nsg}')
self.cmd('network watcher flow-log configure -g {rg} --nsg {nsg} --enabled --retention 5 --storage-account {sa}')
self.cmd('network watcher flow-log configure -g {rg} --nsg {nsg} --retention 0')
self.cmd('network watcher flow-log show -g {rg} --nsg {nsg}')
# test traffic-analytics features
self.cmd('resource create -g {rg} -n {ws} --resource-type Microsoft.OperationalInsights/workspaces -p @"{la_prop_path}"')
self.cmd('network watcher flow-log configure -g {rg} --nsg {nsg} --workspace {ws} --interval 10', checks=[
# self.check("contains(flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.workspaceResourceId, '{ws}')", True),
# self.check("flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.trafficAnalyticsInterval", 10),
# self.check("flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.enabled", True)
])
self.cmd('network watcher flow-log show -g {rg} --nsg {nsg}', checks=[
# self.check("contains(flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.workspaceResourceId, '{ws}')", True),
# self.check("flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.trafficAnalyticsInterval", 10),
# self.check("flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.enabled", True)
])
self.cmd('network watcher flow-log configure -g {rg} --nsg {nsg} --workspace {ws} --interval 60', checks=[
# self.check("flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.trafficAnalyticsInterval", 60)
])
self.cmd('network watcher flow-log show -g {rg} --nsg {nsg}', checks=[
# self.check("flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.trafficAnalyticsInterval", 60)
])
self.cmd('network watcher flow-log configure -g {rg} --nsg {nsg} --traffic-analytics false', checks=[
# self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.enabled', False)
])
self.cmd('network watcher flow-log show -g {rg} --nsg {nsg}', checks=[
# self.check('flowAnalyticsConfiguration.networkWatcherFlowAnalyticsConfiguration.enabled', False)
])
self.cmd('network watcher flow-log configure -g {rg} --nsg {nsg} --workspace ""', checks=[
# self.check('flowAnalyticsConfiguration', None)
])
self.cmd('network watcher flow-log show -g {rg} --nsg {nsg}', checks=[
# self.check('flowAnalyticsConfiguration', None)
])
@ResourceGroupPreparer(name_prefix='cli_test_nw_flow_log2', location='canadaeast')
@StorageAccountPreparer(name_prefix='clitestnw', location='canadaeast', kind='StorageV2')
def test_network_watcher_flow_log2(self, resource_group, resource_group_location, storage_account):
self.kwargs.update({
'loc': resource_group_location,
'nsg': 'nsg1',
'sa': storage_account
})
self.cmd('network watcher configure -g {rg} --locations westus westus2 westcentralus eastus canadaeast --enabled')
self.cmd('network nsg create -g {rg} -n {nsg}')
self.cmd('network watcher flow-log configure -g {rg} --nsg {nsg} --enabled --retention 5 --storage-account {sa}')
@mock.patch('azure.cli.command_modules.vm._actions._get_thread_count', _mock_thread_count)
@ResourceGroupPreparer(name_prefix='cli_test_nw_packet_capture', location='westus')
@AllowLargeResponse()
def test_network_watcher_packet_capture(self, resource_group, resource_group_location):
self.kwargs.update({
'loc': resource_group_location,
'vm': 'vm1',
'capture': 'capture1'
})
self.cmd('vm create -g {rg} -n {vm} --image UbuntuLTS --authentication-type password --admin-username deploy --admin-password PassPass10!) --nsg {vm} --nsg-rule None')
self.cmd('vm extension set -g {rg} --vm-name {vm} -n NetworkWatcherAgentLinux --publisher Microsoft.Azure.NetworkWatcher')
self.cmd('network watcher packet-capture create -g {rg} --vm {vm} -n {capture} --file-path capture/capture.cap')
self.cmd('network watcher packet-capture show -l {loc} -n {capture}')
self.cmd('network watcher packet-capture stop -l {loc} -n {capture}')
self.cmd('network watcher packet-capture show-status -l {loc} -n {capture}')
self.cmd('network watcher packet-capture list -l {loc}')
self.cmd('network watcher packet-capture delete -l {loc} -n {capture}')
self.cmd('network watcher packet-capture list -l {loc}')
@ResourceGroupPreparer(name_prefix='cli_test_nw_troubleshooting', location='westcentralus')
@StorageAccountPreparer(name_prefix='clitestnw', location='westcentralus')
@AllowLargeResponse()
def test_network_watcher_troubleshooting(self, resource_group, resource_group_location, storage_account):
self.kwargs.update({
'loc': resource_group_location,
'sa': storage_account
})
# set up resource to troubleshoot
self.cmd('storage container create -n troubleshooting --account-name {sa}')
sa = self.cmd('storage account show -g {rg} -n {sa}').get_output_in_json()
self.kwargs['storage_path'] = sa['primaryEndpoints']['blob'] + 'troubleshooting'
self.cmd('network vnet create -g {rg} -n vnet1 --subnet-name GatewaySubnet')
self.cmd('network public-ip create -g {rg} -n vgw1-pip')
self.cmd('network vnet-gateway create -g {rg} -n vgw1 --vnet vnet1 --public-ip-address vgw1-pip --no-wait')
# test troubleshooting
self.cmd('network vnet-gateway wait -g {rg} -n vgw1 --created')
self.cmd('network watcher troubleshooting start --resource vgw1 -t vnetGateway -g {rg} --storage-account {sa} --storage-path {storage_path}')
self.cmd('network watcher troubleshooting show --resource vgw1 -t vnetGateway -g {rg}')
class ServiceEndpointScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='network_service_endpoint_scenario_test', location='westcentralus')
def test_network_service_endpoints(self, resource_group, resource_group_location):
self.kwargs.update({
'policy': 'policy1',
'pd_name': 'storage-def',
'sub': self.get_subscription_id(),
'vnet': 'vnet1',
'subnet': 'subnet1',
'loc': resource_group_location
})
self.cmd('network service-endpoint list -l {loc}')
# test policy CRUD
self.cmd('network service-endpoint policy create -g {rg} -n {policy} --tags test=best',
checks=self.check('tags.test', 'best'))
self.cmd('network service-endpoint policy update -g {rg} -n {policy} --tags test=nest',
checks=self.check('tags.test', 'nest'))
self.cmd('network service-endpoint policy list -g {rg}',
checks=self.check('length(@)', 1))
self.cmd('network service-endpoint policy show -g {rg} -n {policy}',
checks=self.check('tags.test', 'nest'))
self.cmd('network service-endpoint policy delete -g {rg} -n {policy}')
self.cmd('network service-endpoint policy list -g {rg}',
checks=self.check('length(@)', 0))
# test policy definition CRUD
self.cmd('network service-endpoint policy create -g {rg} -n {policy} --tags test=best')
self.cmd('network service-endpoint policy-definition create -g {rg} --policy-name {policy} -n {pd_name} --service Microsoft.Storage --description "Test Def" --service-resources /subscriptions/{sub}', checks=[
self.check("length(serviceResources)", 1),
self.check('service', 'Microsoft.Storage'),
self.check('description', 'Test Def')
])
self.cmd('network service-endpoint policy-definition update -g {rg} --policy-name {policy} -n {pd_name} --description "Better description"',
self.check('description', 'Better description'))
self.cmd('network service-endpoint policy-definition list -g {rg} --policy-name {policy}',
checks=self.check('length(@)', 1))
self.cmd('network service-endpoint policy-definition show -g {rg} --policy-name {policy} -n {pd_name}',
checks=self.check('description', 'Better description'))
self.cmd('network service-endpoint policy-definition delete -g {rg} --policy-name {policy} -n {pd_name}')
self.cmd('network service-endpoint policy-definition list -g {rg} --policy-name {policy}',
checks=self.check('length(@)', 0))
# create a subnet with the policy
self.cmd('network service-endpoint policy-definition create -g {rg} --policy-name {policy} -n {pd_name} --service Microsoft.Storage --service-resources /subscriptions/{sub}')
self.cmd('network vnet create -g {rg} -n {vnet}')
self.cmd('network vnet subnet create -g {rg} --vnet-name {vnet} -n {subnet} --address-prefix 10.0.0.0/24 --service-endpoints Microsoft.Storage --service-endpoint-policy {policy}',
checks=self.check("contains(serviceEndpointPolicies[0].id, '{policy}')", True))
class NetworkProfileScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='test_network_profile')
def test_network_profile(self, resource_group):
# no e2e scenario without create. Testing path to service only.
self.cmd('network profile list')
self.cmd('network profile list -g {rg}')
with self.assertRaisesRegexp(SystemExit, '3'):
self.cmd('network profile show -g {rg} -n dummy')
self.cmd('network profile delete -g {rg} -n dummy -y')
class NetworkServiceAliasesScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='test_network_service_aliases')
def test_network_service_aliases(self, resource_group):
self.kwargs.update({
'rg': resource_group
})
self.cmd('network list-service-aliases -l centralus')
self.cmd('network list-service-aliases -l centralus -g {rg}')
class NetworkBastionHostScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='test_network_bastion_host')
def test_network_batsion_host_create(self, resource_group):
self.kwargs.update({
'rg': resource_group,
'vm': 'clivm',
'vnet': 'vnet',
'subnet1': 'AzureBastionSubnet',
'subnet2': 'vmSubnet',
'ip1': 'ip1',
'bastion': 'clibastion'
})
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet1}')
self.cmd('network vnet subnet create -g {rg} -n {subnet2} --vnet-name {vnet} --address-prefixes 10.0.2.0/24')
self.cmd('network public-ip create -g {rg} -n {ip1} --sku Standard')
self.cmd('vm create -g {rg} -n {vm} --image UbuntuLTS --vnet-name {vnet} --subnet {subnet2} '
'--admin-password TestPassword11!! --admin-username testadmin --authentication-type password --nsg-rule None')
self.cmd('network bastion create -g {rg} -n {bastion} --vnet-name {vnet} --public-ip-address {ip1}', checks=[
self.check('type', 'Microsoft.Network/bastionHosts'),
self.check('name', '{bastion}')
])
self.cmd('network bastion list')
self.cmd('network bastion list -g {rg}', checks=[
self.check('length(@)', 1)
])
self.cmd('network bastion show -g {rg} -n {bastion}', checks=[
self.check('type', 'Microsoft.Network/bastionHosts'),
self.check('name', '{bastion}')
])
self.cmd('network bastion delete -g {rg} -n {bastion}')
class NetworkVnetLocalContextScenarioTest(LocalContextScenarioTest):
@ResourceGroupPreparer()
def test_network_vnet_local_context(self):
self.kwargs.update({
'vnet': self.create_random_name(prefix='vnet-', length=12),
'subnet': self.create_random_name(prefix='subnet-', length=12)
})
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet}',
checks=[self.check('newVNet.name', self.kwargs['vnet'])])
self.cmd('network vnet show', checks=[
self.check('name', self.kwargs['vnet'])
])
self.cmd('network vnet subnet show', checks=[
self.check('name', self.kwargs['subnet'])
])
with self.assertRaises(CLIError):
self.cmd('network vnet delete')
with self.assertRaises(CLIError):
self.cmd('network vnet subnet delete')
self.cmd('network vnet subnet delete -n {subnet}')
self.cmd('network vnet delete -n {vnet}')
class NetworkSecurityPartnerProviderScenarioTest(ScenarioTest):
def __init__(self, method_name, config_file=None, recording_dir=None, recording_name=None, recording_processors=None,
replay_processors=None, recording_patches=None, replay_patches=None):
super(NetworkSecurityPartnerProviderScenarioTest, self).__init__(method_name)
self.cmd('extension add -n virtual-wan')
@ResourceGroupPreparer()
def test_network_security_partner_provider(self, resource_group):
self.kwargs.update({
'vwan': 'clitestvwan',
'vhub': 'clitestvhub',
'gateway': 'cligateway',
'name': 'clisecuritypartnerprovider',
'rg': resource_group
})
self.cmd('network vwan create -n {vwan} -g {rg} --type Standard')
self.cmd('network vhub create -g {rg} -n {vhub} --vwan {vwan} --address-prefix 10.5.0.0/16 -l westus --sku Standard')
self.cmd('network vpn-gateway create -g {rg} -n {gateway} --vhub {vhub}')
self.cmd('network security-partner-provider create -n {name} -g {rg} --vhub {vhub} --provider Checkpoint', checks=[
self.check('name', '{name}'),
self.check('securityProviderName', 'Checkpoint')
])
self.cmd('network security-partner-provider show -n {name} -g {rg}', checks=[
self.check('name', '{name}'),
self.check('securityProviderName', 'Checkpoint')
])
self.cmd('network security-partner-provider update -n {name} -g {rg} --tag a=b', checks=[
self.check('tags.a', 'b')
])
self.cmd('network security-partner-provider list -g {rg}', checks=[
self.check('length(@)', 1)
])
self.cmd('network security-partner-provider list', checks=[
self.check('length(@)', 1)
])
self.cmd('network security-partner-provider delete -n {name} -g {rg}')
class NetworkVirtualApplianceScenarioTest(ScenarioTest):
def setUp(self):
super(NetworkVirtualApplianceScenarioTest, self).setUp()
self.cmd('extension add -n virtual-wan')
def tearDown(self):
# avoid influence other test when parallel run
# self.cmd('extension remove -n virtual-wan')
super(NetworkVirtualApplianceScenarioTest, self).tearDown()
@ResourceGroupPreparer(location='westcentralus')
def test_network_virtual_appliance(self, resource_group):
self.kwargs.update({
'vwan': 'clitestvwan',
'vhub': 'clitestvhub',
'name': 'cli-virtual-appliance',
'site': 'cli-site',
'blob': 'https://azurecliprod.blob.core.windows.net/cli-extensions/account-0.1.0-py2.py3-none-any.whl',
'rg': resource_group
})
self.cmd('network vwan create -n {vwan} -g {rg} --type Standard')
self.cmd('network vhub create -g {rg} -n {vhub} --vwan {vwan} --address-prefix 10.5.0.0/16 --sku Standard')
self.cmd('network virtual-appliance create -n {name} -g {rg} --vhub {vhub} --vendor "barracudasdwanrelease" '
'--scale-unit 2 -v latest --asn 10000 --init-config "echo $abc" '
'--boot-blobs {blob} {blob} --cloud-blobs {blob} {blob}',
checks=[
self.check('name', '{name}'),
self.check('length(bootStrapConfigurationBlobs)', 2),
self.check('length(cloudInitConfigurationBlobs)', 2),
self.check('virtualApplianceAsn', 10000),
self.check('cloudInitConfiguration', "echo $abc")
])
self.cmd('network virtual-appliance update -n {name} -g {rg} --asn 20000 --init-config "echo $abcd"', checks=[
self.check('virtualApplianceAsn', 20000),
self.check('cloudInitConfiguration', "echo $abcd")
])
self.cmd('network virtual-appliance show -n {name} -g {rg}', checks=[
self.check('name', '{name}'),
self.check('length(bootStrapConfigurationBlobs)', 2),
self.check('length(cloudInitConfigurationBlobs)', 2),
self.check('virtualApplianceAsn', 20000),
self.check('cloudInitConfiguration', "echo $abcd")
])
self.cmd('network virtual-appliance list -g {rg}', checks=[
self.check('length(@)', 1)
])
self.cmd('network virtual-appliance list', checks=[
self.check('length(@)', 1)
])
self.cmd('network virtual-appliance sku list', checks=[
self.check('length(@)', 4)
])
self.cmd('network virtual-appliance sku show --name "barracudasdwanrelease"', checks=[
self.check('name', 'barracudasdwanrelease')
])
self.cmd('network virtual-appliance site create -n {site} -g {rg} --appliance-name {name} --address-prefix 10.0.0.0/24 --allow --default --optimize', checks=[
self.check('name', '{site}'),
self.check('o365Policy.breakOutCategories.allow', True),
self.check('o365Policy.breakOutCategories.default', True),
self.check('o365Policy.breakOutCategories.optimize', True),
self.check('addressPrefix', '10.0.0.0/24')
])
self.cmd('network virtual-appliance site update -n {site} -g {rg} --appliance-name {name} --address-prefix 10.0.0.1/24 --allow false --default false --optimize false', checks=[
self.check('name', '{site}'),
self.check('o365Policy.breakOutCategories.allow', False),
self.check('o365Policy.breakOutCategories.default', False),
self.check('o365Policy.breakOutCategories.optimize', False),
self.check('addressPrefix', '10.0.0.1/24')
])
self.cmd('network virtual-appliance site show -n {site} -g {rg} --appliance-name {name}', checks=[
self.check('name', '{site}'),
self.check('o365Policy.breakOutCategories.allow', False),
self.check('o365Policy.breakOutCategories.default', False),
self.check('o365Policy.breakOutCategories.optimize', False),
self.check('addressPrefix', '10.0.0.1/24')
])
self.cmd('network virtual-appliance site list -g {rg} --appliance-name {name}', checks=[
# self.check('length(@)', 1)
])
self.cmd('network virtual-appliance site delete -n {site} -g {rg} --appliance-name {name} -y')
self.cmd('network virtual-appliance delete -n {name} -g {rg} -y')
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2010-2020 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utilities for writing code that runs on Python 2 and 3"""
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.15.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, type)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.__func__
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
del io
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
_assertNotRegex = "assertNotRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
def assertNotRegex(self, *args, **kwargs):
return getattr(self, _assertNotRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] > (3,):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
# This does exactly the same what the :func:`py3:functools.update_wrapper`
# function does on Python versions after 3.2. It sets the ``__wrapped__``
# attribute on ``wrapper`` object and it doesn't raise an error if any of
# the attributes mentioned in ``assigned`` and ``updated`` are missing on
# ``wrapped`` object.
def _update_wrapper(wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
continue
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
wrapper.__wrapped__ = wrapped
return wrapper
_update_wrapper.__doc__ = functools.update_wrapper.__doc__
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
return functools.partial(_update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
wraps.__doc__ = functools.wraps.__doc__
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
if sys.version_info[:2] >= (3, 7):
# This version introduced PEP 560 that requires a bit
# of extra care (we mimic what is done by __build_class__).
resolved_bases = types.resolve_bases(bases)
if resolved_bases is not bases:
d['__orig_bases__'] = bases
else:
resolved_bases = bases
return meta(name, resolved_bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, binary_type):
return s
if isinstance(s, text_type):
return s.encode(encoding, errors)
raise TypeError("not expecting type '%s'" % type(s))
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
# Optimization: Fast return for the common case.
if type(s) is str:
return s
if PY2 and isinstance(s, text_type):
return s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
return s.decode(encoding, errors)
elif not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
return s
def ensure_text(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to six.text_type.
For Python 2:
- `unicode` -> `unicode`
- `str` -> `unicode`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
elif isinstance(s, text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def python_2_unicode_compatible(klass):
"""
A class decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 20:31:10 2020
@author: pritz
"""
#import libraries
import pickle
#from flask import Flask, request
#import flasgger
#from flasgger import Swagger
#import numpy as np
#import pandas as pd
import streamlit as st
from PIL import Image
#app=Flask(__name__)
#Swagger(app)
#Unpickling the classifier
pickle_in=open('classifier.pkl','rb')
classifier=pickle.load(pickle_in)
#@app.route('/')
#@app.route('/home')
def welcome_page():
return '<h1> WELCOME TO THE HOMEPAGE </h1>'
#@app.route('/predict',methods=["GET"])
def predict_note_authentication(variance,skewness,curtosis,entropy):
""" Let's Authenticate the Bank Note
This is using docstring for specification.
---
parameters:
- name: variance
in: query
type: number
required: true
- name: skewness
in: query
type: number
required: true
- name: curtosis
in: query
type: number
required: true
- name: entropy
in: query
type: number
required: true
responses:
200:
description: The output values
"""
# variance=request.args.get('variance')
# skewness=request.args.get('skewness')
# curtosis=request.args.get('curtosis')
# entropy=request.args.get('entropy')
prediction=classifier.predict([[variance,skewness,curtosis,entropy]])
print(prediction)
return prediction
def main():
st.title('Bank Authenticator')
html_temp= """
<div style="background-color:tomato;padding:10px">
<h2 style="color:white;text-align:center;">Streamlit Bank Authenticator ML App </h2>
</div>
"""
st.markdown(html_temp,unsafe_allow_html=True)
variance=st.text_input('variance','Type Here')
skewness=st.text_input('skewness','Type Here')
curtosis=st.text_input('curtosis','Type Here')
entropy=st.text_input('entropy','Type Here')
result=""
if st.button("Preict"):
result=predict_note_authentication(variance,skewness,curtosis,entropy)
st.success('The Output is {}'.format(result()))
if st.button("About"):
st.text("Lets LEarn")
st.text("Built with Streamlit")
if __name__=='__main__':
main()
#url to run the app : http://127.0.0.1:5000/apidocs/
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.LOW
def tamper(payload, **kwargs):
"""
Replaces space character (' ') with a pound character ('#') followed by
a new line ('\n')
Requirement:
* MSSQL
* MySQL
Notes:
* Useful to bypass several web application firewalls
>>> tamper('1 AND 9227=9227')
'1%23%0AAND%23%0A9227=9227'
"""
retVal = ""
if payload:
for i in xrange(len(payload)):
if payload[i].isspace():
retVal += "%23%0A"
elif payload[i] == '#' or payload[i:i + 3] == '-- ':
retVal += payload[i:]
break
else:
retVal += payload[i]
return retVal
|
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_level_over_threshold
def run():
# Build list of stations
stations = build_station_list()
# Update latest level data for all stations
update_water_levels(stations)
stations_over = stations_level_over_threshold(stations, 0.8)
for station_level in stations_over:
print(station_level[0].name, station_level[1])
if __name__ == "__main__":
print("*** Task 2B: CUED Part IA Flood Warning System ***")
run()
|
'''Unit tests for ckan/logic/auth/create.py.
'''
import mock
import nose
import ckan.model as core_model
import ckan.new_tests.helpers as helpers
import ckan.new_tests.factories as factories
import ckan.logic.auth.create as auth_create
logic = helpers.logic
assert_equals = nose.tools.assert_equals
class TestCreateDatasetAnonymousSettings(object):
def test_anon_cant_create(self):
response = auth_create.package_create({'user': None}, None)
assert_equals(response['success'], False)
@helpers.change_config('ckan.auth.anon_create_dataset', True)
def test_anon_can_create(self):
response = auth_create.package_create({'user': None}, None)
assert_equals(response['success'], True)
@helpers.change_config('ckan.auth.anon_create_dataset', True)
@helpers.change_config('ckan.auth.create_dataset_if_not_in_organization',
False)
def test_cdnio_overrides_acd(self):
response = auth_create.package_create({'user': None}, None)
assert_equals(response['success'], False)
@helpers.change_config('ckan.auth.anon_create_dataset', True)
@helpers.change_config('ckan.auth.create_unowned_dataset', False)
def test_cud_overrides_acd(self):
response = auth_create.package_create({'user': None}, None)
assert_equals(response['success'], False)
class TestCreateDatasetLoggedInSettings(object):
def setup(self):
helpers.reset_db()
def test_no_org_user_can_create(self):
user = factories.User()
response = auth_create.package_create({'user': user['name']}, None)
assert_equals(response['success'], True)
@helpers.change_config('ckan.auth.anon_create_dataset', True)
@helpers.change_config('ckan.auth.create_dataset_if_not_in_organization',
False)
def test_no_org_user_cant_create_if_cdnio_false(self):
user = factories.User()
response = auth_create.package_create({'user': user['name']}, None)
assert_equals(response['success'], False)
@helpers.change_config('ckan.auth.anon_create_dataset', True)
@helpers.change_config('ckan.auth.create_unowned_dataset', False)
def test_no_org_user_cant_create_if_cud_false(self):
user = factories.User()
response = auth_create.package_create({'user': user['name']}, None)
assert_equals(response['success'], False)
def test_same_org_user_can_create(self):
user = factories.User()
org_users = [{'name': user['name'], 'capacity': 'editor'}]
org = factories.Organization(users=org_users)
dataset = {'name': 'same-org-user-can-create', 'owner_org': org['id']}
context = {'user': user['name'], 'model': core_model}
response = auth_create.package_create(context, dataset)
assert_equals(response['success'], True)
def test_different_org_user_cant_create(self):
user = factories.User()
org_users = [{'name': user['name'], 'capacity': 'editor'}]
org1 = factories.Organization(users=org_users)
org2 = factories.Organization()
dataset = {'name': 'different-org-user-cant-create',
'owner_org': org2['id']}
context = {'user': user['name'], 'model': core_model}
response = auth_create.package_create(context, dataset)
assert_equals(response['success'], False)
class TestCreate(object):
def setup(self):
helpers.reset_db()
@mock.patch('ckan.logic.auth.create.group_member_create')
def test_user_invite_delegates_correctly_to_group_member_create(self, gmc):
user = factories.User()
context = {
'user': user['name'],
'model': None,
'auth_user_obj': user
}
data_dict = {'group_id': 42}
gmc.return_value = {'success': False}
nose.tools.assert_raises(logic.NotAuthorized, helpers.call_auth,
'user_invite', context=context, **data_dict)
gmc.return_value = {'success': True}
result = helpers.call_auth('user_invite', context=context, **data_dict)
assert result is True
|
# Some weird stuff perfectly legit since python 3.6
universe_age = 14_000_000_000
print (universe_age)
# return 14000000000
# 3 in a row !!! or more
x, y, z = 14, 'olivier', True
print (x, y, z)
# return 14 olivier True
MAX_CONNECTIONS = 5000
# A constant is like a variable whose value stays the same throughout the life
# of a program. Python doesn’t have built-in constant types, but Python pro-
# grammers use all capital letters to indicate a variable should be treated as a
# constant and never be changed:
int a = 0b0010'0010'1110;
int b = 1'962'174;
|
"""Reference implementation of the CWL standards."""
__author__ = "pamstutz@veritasgenetics.com"
CWL_CONTENT_TYPES = [
"text/plain",
"application/json",
"text/vnd.yaml",
"text/yaml",
"text/x-yaml",
"application/x-yaml",
]
|
from src.model.wall import Wall
WIDTH = 840
HEIGHT = 600
# Base room that contains only the outer walls
base_room = []
padding_base = 20
base_room.append(Wall((padding_base, padding_base), (WIDTH - padding_base, padding_base)))
base_room.append(Wall((padding_base, HEIGHT - padding_base), (WIDTH - padding_base, HEIGHT - padding_base)))
base_room.append(Wall((padding_base, padding_base), (padding_base, HEIGHT - padding_base)))
base_room.append(Wall((WIDTH - padding_base, padding_base), (WIDTH - padding_base, HEIGHT - padding_base)))
def get_base_room():
return [wall for wall in base_room]
# Room 1 with a rectangle in the middle
room_1 = get_base_room()
padding = 230
room_1.append(Wall((padding, padding), (WIDTH - padding, padding)))
room_1.append(Wall((padding, HEIGHT - padding), (WIDTH - padding, HEIGHT - padding)))
room_1.append(Wall((padding, padding), (padding, HEIGHT - padding)))
room_1.append(Wall((WIDTH - padding, padding), (WIDTH - padding, HEIGHT - padding)))
# Room 2 with a trapezoi in the middle
room_2 = get_base_room()
room_2.append(Wall((padding * 2, padding), (WIDTH - padding, padding)))
room_2.append(Wall((padding, HEIGHT - padding), (WIDTH - padding, HEIGHT - padding)))
room_2.append(Wall((padding * 2, padding), (padding, HEIGHT - padding)))
room_2.append(Wall((WIDTH - padding, padding), (WIDTH - padding, HEIGHT - padding)))
# Room 3 with strange form
room_3 = get_base_room()
padding = 150
room_3.append(Wall((padding, padding), (WIDTH - padding, padding)))
room_3.append(Wall((padding, padding), (padding, HEIGHT - padding)))
room_3.append(Wall((WIDTH / 2, HEIGHT - padding), (WIDTH - padding, HEIGHT - padding)))
room_3.append(Wall((WIDTH / 2, HEIGHT - padding), (WIDTH / 2, HEIGHT /2)))
# Room 4: small room
room_4 = []
padding = 30
room_4.append(Wall((padding, padding), (WIDTH/2 - padding, padding)))
room_4.append(Wall((padding, HEIGHT - padding), (WIDTH/2 - padding, HEIGHT - padding)))
room_4.append(Wall((padding, padding), (padding, HEIGHT - padding)))
room_4.append(Wall((WIDTH/2 - padding, padding), (WIDTH/2 - padding, HEIGHT - padding)))
room_5 = get_base_room()
padding = 150
room_5.append(Wall((padding, padding_base), (padding, HEIGHT - padding * 2)))
room_5.append(Wall((WIDTH - padding, padding * 2), (WIDTH - padding, HEIGHT - padding_base)))
room_5.append(Wall((WIDTH / 2, padding), (WIDTH / 2, HEIGHT - padding)))
room_5.append(Wall((padding * 2, HEIGHT / 2), (WIDTH - padding * 2, HEIGHT / 2)))
|
import os
SCOPE = os.environ.get("SCOPE", "")
PROCESS_QUEUE = "{SCOPE}s3-sqs-lambda-async-chunked-process-queue".format(SCOPE=SCOPE)
from aws_scatter_gather.util.sqs_batch_sender import AsyncSqsBatchSender
def new_batch_sender(sqs_client):
return AsyncSqsBatchSender(sqs_client, queue_name=PROCESS_QUEUE)
|
__author__ = 'rcj1492'
__created__ = '2017.05'
__license__ = 'MIT'
# initialize logging
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# retrieve schemas
from pocketlab import __module__
from jsonmodel.loader import jsonLoader
fields_schema = jsonLoader(__module__, 'models/lab-fields.json')
cli_schema = jsonLoader(__module__, 'models/lab-cli.json')
# construct fields model
from pocketlab.utils import compile_model
fields_model = compile_model(fields_schema, cli_schema)
# initialize colorama
import colorama
colorama.init()
if __name__ == '__main__':
print(fields_model.keyMap)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from neutron.db import db_base_plugin_v2
from neutron.extensions import subnet_service_types
from neutron.tests.unit.db import test_db_base_plugin_v2
class SubnetServiceTypesExtensionManager(object):
def get_resources(self):
return []
def get_actions(self):
return []
def get_request_extensions(self):
return []
def get_extended_resources(self, version):
return subnet_service_types.get_extended_resources(version)
class SubnetServiceTypesExtensionTestPlugin(
db_base_plugin_v2.NeutronDbPluginV2):
"""Test plugin to mixin the subnet service_types extension.
"""
supported_extension_aliases = ["subnet-service-types"]
class SubnetServiceTypesExtensionTestCase(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
"""Test API extension subnet_service_types attributes.
"""
CIDRS = ['10.0.0.0/8', '20.0.0.0/8', '30.0.0.0/8']
IP_VERSION = 4
def setUp(self):
plugin = ('neutron.tests.unit.extensions.test_subnet_service_types.' +
'SubnetServiceTypesExtensionTestPlugin')
ext_mgr = SubnetServiceTypesExtensionManager()
super(SubnetServiceTypesExtensionTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
def _create_service_subnet(self, service_types=None, cidr=None,
network=None, enable_dhcp=False):
if not network:
with self.network() as network:
pass
network = network['network']
if not cidr:
cidr = self.CIDRS[0]
args = {'net_id': network['id'],
'tenant_id': network['tenant_id'],
'cidr': cidr,
'ip_version': self.IP_VERSION,
'enable_dhcp': enable_dhcp}
if service_types:
args['service_types'] = service_types
return self._create_subnet(self.fmt, **args)
def _test_create_subnet(self, service_types, expect_fail=False):
res = self._create_service_subnet(service_types)
if expect_fail:
self.assertEqual(webob.exc.HTTPClientError.code,
res.status_int)
else:
subnet = self.deserialize('json', res)
subnet = subnet['subnet']
self.assertEqual(len(service_types),
len(subnet['service_types']))
for service in service_types:
self.assertIn(service, subnet['service_types'])
def test_create_subnet_blank_type(self):
self._test_create_subnet([])
def test_create_subnet_bar_type(self):
self._test_create_subnet(['network:bar'])
def test_create_subnet_foo_type(self):
self._test_create_subnet(['compute:foo'])
def test_create_subnet_bar_and_foo_type(self):
self._test_create_subnet(['network:bar', 'compute:foo'])
def test_create_subnet_invalid_type(self):
self._test_create_subnet(['foo'], expect_fail=True)
def test_create_subnet_no_type(self):
res = self._create_service_subnet()
subnet = self.deserialize('json', res)
subnet = subnet['subnet']
self.assertFalse(subnet['service_types'])
def _test_update_subnet(self, subnet, service_types, expect_fail=False):
data = {'subnet': {'service_types': service_types}}
req = self.new_update_request('subnets', data, subnet['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
if expect_fail:
self.assertEqual('InvalidSubnetServiceType',
res['NeutronError']['type'])
else:
subnet = res['subnet']
self.assertEqual(len(service_types),
len(subnet['service_types']))
for service in service_types:
self.assertIn(service, subnet['service_types'])
def test_update_subnet_zero_to_one(self):
service_types = ['network:foo']
# Create a subnet with no service type
res = self._create_service_subnet()
subnet = self.deserialize('json', res)['subnet']
# Update it with a single service type
self._test_update_subnet(subnet, service_types)
def test_update_subnet_one_to_two(self):
service_types = ['network:foo']
# Create a subnet with one service type
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with two service types
service_types.append('compute:bar')
self._test_update_subnet(subnet, service_types)
def test_update_subnet_two_to_one(self):
service_types = ['network:foo', 'compute:bar']
# Create a subnet with two service types
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with one service type
service_types = ['network:foo']
self._test_update_subnet(subnet, service_types)
def test_update_subnet_one_to_zero(self):
service_types = ['network:foo']
# Create a subnet with one service type
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with zero service types
service_types = []
self._test_update_subnet(subnet, service_types)
def test_update_subnet_invalid_type(self):
service_types = ['foo']
# Create a subnet with no service type
res = self._create_service_subnet()
subnet = self.deserialize('json', res)['subnet']
# Update it with an invalid service type
self._test_update_subnet(subnet, service_types, expect_fail=True)
def _assert_port_res(self, port, service_type, subnet, fallback,
error='IpAddressGenerationFailureNoMatchingSubnet'):
res = self.deserialize('json', port)
if fallback:
port = res['port']
self.assertEqual(1, len(port['fixed_ips']))
self.assertEqual(service_type, port['device_owner'])
self.assertEqual(subnet['id'], port['fixed_ips'][0]['subnet_id'])
else:
self.assertEqual(error, res['NeutronError']['type'])
def test_create_port_with_matching_service_type(self):
with self.network() as network:
pass
matching_type = 'network:foo'
non_matching_type = 'network:bar'
# Create a subnet with no service types
self._create_service_subnet(network=network)
# Create a subnet with a non-matching service type
self._create_service_subnet([non_matching_type],
cidr=self.CIDRS[2],
network=network)
# Create a subnet with a service type to match the port device owner
res = self._create_service_subnet([matching_type],
cidr=self.CIDRS[1],
network=network)
service_subnet = self.deserialize('json', res)['subnet']
# Create a port with device owner matching the correct service subnet
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=matching_type)
self._assert_port_res(port, matching_type, service_subnet, True)
def test_create_port_without_matching_service_type(self, fallback=True):
with self.network() as network:
pass
subnet = ''
matching_type = 'compute:foo'
non_matching_type = 'network:foo'
if fallback:
# Create a subnet with no service types
res = self._create_service_subnet(network=network)
subnet = self.deserialize('json', res)['subnet']
# Create a subnet with a non-matching service type
self._create_service_subnet([non_matching_type],
cidr=self.CIDRS[1],
network=network)
# Create a port with device owner not matching the service subnet
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=matching_type)
self._assert_port_res(port, matching_type, subnet, fallback)
def test_create_port_without_matching_service_type_no_fallback(self):
self.test_create_port_without_matching_service_type(fallback=False)
def test_create_port_no_device_owner(self, fallback=True):
with self.network() as network:
pass
subnet = ''
service_type = 'compute:foo'
if fallback:
# Create a subnet with no service types
res = self._create_service_subnet(network=network)
subnet = self.deserialize('json', res)['subnet']
# Create a subnet with a service_type
self._create_service_subnet([service_type],
cidr=self.CIDRS[1],
network=network)
# Create a port without a device owner
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'])
self._assert_port_res(port, '', subnet, fallback)
def test_create_port_no_device_owner_no_fallback(self):
self.test_create_port_no_device_owner(fallback=False)
def test_create_port_exhausted_subnet(self, fallback=True):
with self.network() as network:
pass
subnet = ''
service_type = 'compute:foo'
if fallback:
# Create a subnet with no service types
res = self._create_service_subnet(network=network)
subnet = self.deserialize('json', res)['subnet']
# Create a subnet with a service_type
res = self._create_service_subnet([service_type],
cidr=self.CIDRS[1],
network=network)
service_subnet = self.deserialize('json', res)['subnet']
# Update the service subnet with empty allocation pools
data = {'subnet': {'allocation_pools': []}}
req = self.new_update_request('subnets', data, service_subnet['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
# Create a port with a matching device owner
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=service_type)
self._assert_port_res(port, service_type, subnet, fallback,
error='IpAddressGenerationFailure')
def test_create_port_exhausted_subnet_no_fallback(self):
self.test_create_port_exhausted_subnet(fallback=False)
def test_create_dhcp_port_compute_subnet(self, enable_dhcp=True):
with self.network() as network:
pass
res = self._create_service_subnet(['compute:nova'],
network=network,
enable_dhcp=enable_dhcp)
subnet = self.deserialize('json', res)['subnet']
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
fixed_ips=[{'subnet_id': subnet['id']}],
device_owner='network:dhcp')
self._assert_port_res(port, 'network:dhcp', subnet, enable_dhcp)
def test_create_dhcp_port_compute_subnet_no_dhcp(self):
self.test_create_dhcp_port_compute_subnet(enable_dhcp=False)
class SubnetServiceTypesExtensionTestCasev6(
SubnetServiceTypesExtensionTestCase):
CIDRS = ['2001:db8:2::/64', '2001:db8:3::/64', '2001:db8:4::/64']
IP_VERSION = 6
|
"""A simple parser for extracting some meaning out of a code cell
The parser walks to the code coming from the kernel and separates it into
SQL code and magic commands.
The SQL code is passed further by the kernel to the MariaDB client for
execution.
The magic objects created here are invoked in the kernel to perform
their duties.
"""
# Copyright (c) MariaDB Foundation.
# Distributed under the terms of the Modified BSD License.
from mariadb_kernel.maria_magics.magic_factory import MagicFactory
class CodeParser:
def __init__(self, log, cell_code, delimiter):
self.code = cell_code
self.magics = []
self.sql = []
self.log = log
self.delimiter = delimiter
self.magic_factory = MagicFactory(log)
self._parse()
def get_sql(self):
return self.sql
def get_magics(self):
return self.magics
def _is_magic(self, code):
return code.startswith("%")
def _is_line_magic(self, code):
return code.startswith("%") and not code.startswith("%%")
def _is_cell_magic(self, code):
return code.startswith("%%")
def _parse(self):
split = self.code.split("\n", maxsplit=1)
first_line = split[0].lstrip()
if self._is_magic(first_line):
magic = first_line.split(" ", maxsplit=1)
magic_code = ""
args = ""
if len(magic) > 1:
args = magic[1]
if self._is_line_magic(first_line):
magic_cmd = magic[0][1:]
magic_obj = self.magic_factory.create_magic(magic_cmd, args)
self.magics.append(magic_obj)
elif self._is_cell_magic(first_line):
magic_cmd = magic[0][2:]
if len(split) > 1:
magic_code = split[1].strip()
magic_obj = self.magic_factory.create_magic(
magic_cmd, {"args": args, "code": magic_code}
)
self.magics.append(magic_obj)
return
code = self.code.strip()
if not code.endswith(self.delimiter):
raise ValueError(
f"Your SQL code doesn't end with delimiter `{self.delimiter}`"
)
self.sql.append(code)
|
# Generated by Django 3.1.5 on 2021-02-10 12:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('application_evaluator', '0009_applicationroundattachment'),
]
operations = [
migrations.DeleteModel(
name='Comment',
),
]
|
#! /usr/bin/env python3
# MIT License
#
#Copyright 2020 Filipe Teixeira
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
# from cpd import Partial_Dependence
# this is a workaround to avoid importing cpd
exec(open('../cpd.py','r').read())
data = pd.read_csv('compound_activity.csv')
y = data['Activity'].to_numpy()
X = pd.get_dummies(data.drop('Activity',axis=1))
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75)
# This a very crude model withou any hyper-parameter optimization
rf_model = RandomForestRegressor()
rf_model.fit(X_train, y_train)
print(f"Score on the Train Set: {rf_model.score(X_train,y_train):6.4f}")
print(f"Score on the Test Set: {rf_model.score(X_test,y_test):6.4f}")
print(X_train)
pd_data = Partial_Dependence(rf_model, X_train, real_features=[f"Spec_{x}" for x in range(250,300)])
print(pd_data)
pd_data.plot()
|
from os import path
from kubetools.constants import (
GIT_BRANCH_ANNOTATION_KEY,
GIT_COMMIT_ANNOTATION_KEY,
GIT_TAG_ANNOTATION_KEY,
)
from kubetools.deploy.util import run_shell_command
from kubetools.exceptions import KubeBuildError
def _is_git_committed(app_dir):
git_status = run_shell_command(
'git', 'status', '--porcelain',
cwd=app_dir,
).strip().decode()
if git_status:
return False
return True
def _get_git_info(app_dir):
git_annotations = {}
commit_hash = run_shell_command(
'git', 'rev-parse', '--short=7', 'HEAD',
cwd=app_dir,
).strip().decode()
git_annotations[GIT_COMMIT_ANNOTATION_KEY] = commit_hash
branch_name = run_shell_command(
'git', 'rev-parse', '--abbrev-ref', 'HEAD',
cwd=app_dir,
).strip().decode()
if branch_name != 'HEAD':
git_annotations[GIT_BRANCH_ANNOTATION_KEY] = branch_name
try:
git_tag = run_shell_command(
'git', 'tag', '--points-at', commit_hash,
cwd=app_dir,
).strip().decode()
except KubeBuildError:
pass
else:
if git_tag:
git_annotations[GIT_TAG_ANNOTATION_KEY] = git_tag
return commit_hash, git_annotations
def get_git_info(app_dir, ignore_git_changes=False):
if path.exists(path.join(app_dir, '.git')):
if not _is_git_committed(app_dir) and not ignore_git_changes:
raise KubeBuildError(f'{app_dir} contains uncommitted changes, refusing to deploy!')
return _get_git_info(app_dir)
raise KubeBuildError(f'{app_dir} is not a valid git repository!')
|
import subprocess
import uuid
import time
import socket
import os
import json
import pytest
import requests
import threading
import boto3
from pytest_localserver.http import WSGIServer
SYMBOLICATOR_BIN = [os.environ.get("SYMBOLICATOR_BIN") or "target/debug/symbolicator"]
AWS_ACCESS_KEY_ID = os.environ.get("SENTRY_SYMBOLICATOR_TEST_AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("SENTRY_SYMBOLICATOR_TEST_AWS_SECRET_ACCESS_KEY")
AWS_REGION_NAME = "us-east-1"
GCS_PRIVATE_KEY = os.environ.get("SENTRY_SYMBOLICATOR_GCS_PRIVATE_KEY")
GCS_CLIENT_EMAIL = os.environ.get("SENTRY_SYMBOLICATOR_GCS_CLIENT_EMAIL")
session = requests.session()
@pytest.fixture
def random_port():
def inner():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("127.0.0.1", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
return inner
@pytest.fixture
def background_process(request):
def inner(*args, **kwargs):
p = subprocess.Popen(*args, **kwargs)
request.addfinalizer(p.kill)
return p
return inner
class Service:
def __init__(self, process, port):
self.process = process
self.port = port
@property
def url(self):
return f"http://127.0.0.1:{self.port}"
def request(self, method, path, **kwargs):
assert path.startswith("/")
return session.request(method, self.url + path, **kwargs)
def post(self, path, **kwargs):
return self.request("post", path, **kwargs)
def get(self, path, **kwargs):
return self.request("get", path, **kwargs)
def wait_http(self, path):
backoff = 0.1
while True:
try:
self.get(path).raise_for_status()
break
except Exception:
time.sleep(backoff)
if backoff > 10:
raise
backoff *= 2
def wait_healthcheck(self):
self.wait_http("/healthcheck")
class Symbolicator(Service):
pass
@pytest.fixture
def symbolicator(tmpdir, request, random_port, background_process):
def inner(**config_data):
config = tmpdir.join("config")
port = random_port()
bind = f"127.0.0.1:{port}"
config_data["bind"] = bind
config_data["logging"] = {"level": "debug"}
config_data.setdefault("connect_to_reserved_ips", True)
if config_data.get("cache_dir"):
config_data["cache_dir"] = str(config_data["cache_dir"])
config.write(json.dumps(config_data))
process = background_process(SYMBOLICATOR_BIN + ["-c", str(config), "run"])
return Symbolicator(process=process, port=port)
return inner
class HitCounter:
def __init__(self, url, hits):
self.url = url
self.hits = hits
self.before_request = None
@pytest.fixture
def hitcounter():
errors = []
hits = {}
hitlock = threading.Lock()
rv = None
def app(environ, start_response):
if rv.before_request:
rv.before_request()
try:
path = environ["PATH_INFO"]
with hitlock:
hits.setdefault(path, 0)
hits[path] += 1
if path.startswith("/redirect/"):
path = path[len("/redirect") :]
start_response("302 Found", [("Location", path)])
return [b""]
elif path.startswith("/msdl/"):
path = path[len("/msdl/") :]
with requests.get(
f"https://msdl.microsoft.com/download/symbols/{path}",
allow_redirects=False, # test redirects with msdl
) as r:
start_response(f"{r.status_code} BOGUS", list(r.headers.items()))
return [r.content]
elif path.startswith("/respond_statuscode/"):
statuscode = int(path.split("/")[2])
start_response(f"{statuscode} BOGUS", [])
return [b""]
elif path.startswith("/garbage_data/"):
start_response("200 OK", [])
return [b"bogus"]
else:
raise AssertionError("Bad path: {}".format(path))
except Exception as e:
errors.append(e)
start_response("500 Internal Server Error", [])
return [b"error"]
server = WSGIServer(application=app, threaded=True)
server.start()
rv = HitCounter(url=server.url, hits=hits)
yield rv
server.stop()
for error in errors:
raise error
@pytest.fixture
def s3():
if not AWS_ACCESS_KEY_ID or not AWS_SECRET_ACCESS_KEY:
pytest.skip("No AWS credentials")
return boto3.resource(
"s3",
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
)
@pytest.fixture
def s3_bucket_config(s3):
bucket_name = f"symbolicator-test-{uuid.uuid4()}"
s3.create_bucket(Bucket=bucket_name)
yield {
"type": "s3",
"bucket": bucket_name,
"access_key": AWS_ACCESS_KEY_ID,
"secret_key": AWS_SECRET_ACCESS_KEY,
"region": AWS_REGION_NAME,
}
s3.Bucket(bucket_name).objects.all().delete()
s3.Bucket(bucket_name).delete()
@pytest.fixture
def ios_bucket_config():
if not GCS_PRIVATE_KEY or not GCS_CLIENT_EMAIL:
pytest.skip("No GCS credentials")
yield {
"id": "ios",
"type": "gcs",
"bucket": "sentryio-system-symbols",
"private_key": GCS_PRIVATE_KEY,
"client_email": GCS_CLIENT_EMAIL,
"prefix": "/ios",
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for converter module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import loader
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.platform import test
class TestConverter(converter.Base):
pass
class ConversionOptionsTest(converter_testing.TestCase):
def test_to_ast(self):
opts = converter.ConversionOptions()
opts_ast = opts.to_ast()
template = '''
def test_fn():
return opts_ast
'''
opts_packed = templates.replace(template, opts_ast=opts_ast)
reparsed, _, _ = loader.load_ast(opts_packed)
reparsed.__dict__['ag__'] = self.make_fake_mod(
'fake_ag', converter.ConversionOptions, converter.Feature)
reparsed_opts = reparsed.test_fn()
self.assertEqual(opts.recursive, reparsed_opts.recursive)
self.assertEqual(opts.user_requested, False)
self.assertEqual(
opts.internal_convert_user_code,
reparsed_opts.internal_convert_user_code)
self.assertEqual(opts.optional_features, reparsed_opts.optional_features)
class ConverterBaseTest(converter_testing.TestCase):
def test_get_definition_directive_basic(self):
directive_key = object
def test_fn():
a = 1
return a
ns = {}
node, ctx = self.prepare(test_fn, ns)
symbol_a = node.body[1].value
defs, = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)
defs.directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
'other_arg': parser.parse_expression('bar'),
}
c = TestConverter(ctx)
value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',
None)
self.assertEqual(value.id, 'foo')
def test_get_definition_directive_default(self):
directive_key = object
def test_fn():
a = 1
return a
ns = {}
node, ctx = self.prepare(test_fn, ns)
symbol_a = node.body[1].value
c = TestConverter(ctx)
value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',
parser.parse_expression('default'))
self.assertEqual(value.id, 'default')
def test_get_definition_directive_multiple_consistent(self):
directive_key = object
def test_fn():
a = 1
if a:
a = 2
return a
ns = {}
node, ctx = self.prepare(test_fn, ns)
symbol_a = node.body[2].value
defs = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)
defs[0].directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
'other_arg': parser.parse_expression('bar'),
}
defs[1].directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
'other_arg': parser.parse_expression('baz'),
}
c = TestConverter(ctx)
value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',
None)
self.assertEqual(value.id, 'foo')
def test_get_definition_directive_multiple_inconsistent(self):
directive_key = object
def test_fn():
a = 1
if a:
a = 2
return a
ns = {}
node, ctx = self.prepare(test_fn, ns)
symbol_a = node.body[2].value
defs = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)
defs[0].directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
}
defs[1].directives[directive_key] = {
'test_arg': parser.parse_expression('bar'),
}
c = TestConverter(ctx)
with self.assertRaises(ValueError):
c.get_definition_directive(symbol_a, directive_key, 'test_arg', None)
if __name__ == '__main__':
test.main()
|
# isochrones.py
# Ben Cook (bcook@cfa.harvard.edu)
"""Define the Isocrhone_Model class"""
import numpy as np
import pandas as pd
import os
import glob
import sys
from warnings import warn
from pkg_resources import resource_filename
##########################
# Useful Utilities
def load_MIST_dir(dir_path, iso_append='.iso.cmd'):
df = pd.DataFrame()
for MIST_doc in glob.glob(os.path.join(dir_path, '*'+iso_append)):
try:
with open(MIST_doc, 'r') as f:
lines = [f.readline() for _ in range(13)]
colnames = lines[-1].strip('#\n').split()
assert ('EEP' in colnames)
dtypes = {c: float for c in colnames}
dtypes['EEP'] = int
new_df = pd.read_table(MIST_doc, names=colnames,
comment='#', delim_whitespace=True,
dtype=dtypes, na_values=['Infinity'])
new_df[new_df.isna()] = 100.
df = df.append([new_df], ignore_index=True)
except Exception:
warn('File not properly formatted: %s' % (MIST_doc))
sys.exit(1)
return df
def _interp_arrays(arr1, arr2, f):
"""Linearly interpolate between two (potentially unequal length) arrays
Arguments:
arr1 -- first (lower) array (len N1 or N1xD)
arr2 -- second (upper) array (len N2 or N2xD, N2 doesn't have to equal N1)
f -- linear interpolation fraction (float between 0 and 1)
Output: interpolated array (len max(N1,N2) or max(N1,N2)xD)
"""
assert (arr1.ndim == arr2.ndim), (
"The two interpolated arrays must have same dimensions")
l1, l2 = len(arr1), len(arr2)
# If arrays are unequal length, extrapolate shorter using trend of longer
if (l1 < l2):
delta = arr2[l1:] - arr2[l1-1]
added = arr1[-1] + delta
arr1 = np.append(arr1, added, axis=0)
elif (l1 > l2):
delta = arr1[l2:] - arr1[l2-1]
added = arr2[-1] + delta
arr2 = np.append(arr2, added, axis=0)
return (1-f)*arr1 + f*arr2
def _feh_from_str(feh_str):
"""Converts a metallicity value to MIST string
Example Usage:
_feh_from_str("m0.53") -> -0.53
_feh_from_str("p1.326") -> 1.326
Arguments:
feh_str -- metallicity (as a string)
Output: float value of metallicity
"""
value = float(feh_str[1:])
if feh_str[0] == 'm':
value *= -1
elif feh_str[0] != 'p':
raise ValueError('feh string not of valid format')
return value
def _feh_to_str(feh):
"""Converts a metallicity value to MIST string
Example Usage:
_feh_to_str(-0.5313) -> "m0.53"
_feh_to_str(1.326) -> "p1.33"
Arguments:
feh -- metallicity (float)
Output: string representing metallicity
"""
result = ''
if (feh < 0):
result += 'm'
else:
result += 'p'
result += '%1.2f' % (np.abs(feh))
return result
def _interp_df_by_mass(df, dm_min):
ages = np.unique(df.age.values)
fehs = np.unique(df['[Fe/H]_init'].values)
new_rows = []
for age in ages:
for feh in fehs:
iso_df = df[np.isclose(df.age, age) & np.isclose(df['[Fe/H]_init'], feh)]
# add more points until reached desired spacing
mass = iso_df.initial_mass.values
frac_dm = np.diff(mass) / mass[:-1]
id_too_large = np.where(frac_dm > dm_min)[0]
for i_max in id_too_large:
# add additional 5 points spacing by interpolating 0.1 between points
row_low = iso_df.iloc[i_max]
row_high = iso_df.iloc[i_max + 1]
for f in np.linspace(0.1, 0.9, 5):
new_rows.append(f*row_low + (1-f)*row_high)
df = df.append(pd.DataFrame(new_rows))
return df
class Isochrone_Model:
"""Models Isochrones (IMF, and magnitudes in particular Filters) using
linear interpolation of MIST models
An Isocrhone_Model incorporates a collection of MIST models, and
allows for interpolating the IMF and magnitudes (for given Filter
objects) at any arbitrary metallicity and mass
Attributes:
MIST_df-- A pandas Dataframe containing all pre-computed MIST datapoints
ages -- An array of ages (in log years) which are valid for the model
Methods:
get_magnitudes -- Pass a Galaxy_Model object, return IMF and magnitudes
for each mass, age, metallicity bin
Constructors:
__init__ -- Pass a list of Filter objects, path to MIST model files,
and array of metallicities.
"""
def __init__(self, filters, MIST_path=None, iso_append=".iso.cmd",
rotating=False,
mag_system='vega', dm_interp=-1):
"""Creates a new Isochrone_Model, given a list of Filter objects
Arguments:
filters -- list of Filter objects
Keyword Arguments:
MIST_path -- directory containing MIST model files
feh_arr -- array of MIST metallicity values to use
dm_interp --
"""
# Locate MIST files
if MIST_path is None:
if rotating:
MIST_path = resource_filename('pcmdpy', 'isochrones/MIST_v1.2_rot/')
else:
MIST_path = resource_filename('pcmdpy', 'isochrones/MIST_v1.2/')
# Import all MIST model files into Pandas dataframe
self.num_filters = len(filters)
# Use optional conversions from VEGA to AB or ST, etc
self.conversions = {}
self.conversions['vega'] = np.zeros(len(filters), dtype=float)
self.conversions['ab'] = np.array([f._zpts['ab'] - f._zpts['vega']
for f in filters])
self.conversions['st'] = np.array([f._zpts['ab'] - f._zpts['vega']
for f in filters])
self.default_system = mag_system.lower()
assert self.default_system in self.conversions.keys(), (
"the given mag_system is not valid. Please choose one of: "
"['vega', 'ab', 'st']")
self.filters = filters
self.filter_names = [f.tex_name for f in self.filters]
# load all MIST files found in directory
if isinstance(MIST_path, str):
self.MIST_df = load_MIST_dir(MIST_path, iso_append=iso_append)
elif isinstance(MIST_path, list):
merge_cols = ['[Fe/H]_init', 'EEP', 'log10_isochrone_age_yr']
self.MIST_df = pd.DataFrame(columns=merge_cols)
# Merge multiple filter sets
for pth in MIST_path:
df_temp = load_MIST_dir(pth, iso_append=iso_append)
self.MIST_df = self.MIST_df.merge(df_temp,
how='outer', on=merge_cols,
suffixes=['', '_y'])
self.MIST_df.drop(
[c for c in self.MIST_df.columns if c.endswith('_y')],
axis=1, inplace=True)
self._feh_arr = np.array(sorted(self.MIST_df['[Fe/H]_init'].unique()))
self.MIST_df.rename(columns={'log10_isochrone_age_yr': 'age',
'[Fe/H]_init': 'feh'},
inplace=True)
# This is deprecated
if dm_interp > 0.:
print('starting manual interpolation')
self.MIST_df = _interp_df_by_mass(self.MIST_df, dm_interp)
print('done with interpolation')
self.MIST_df = self.MIST_df.sort_values(by=['feh', 'age',
'initial_mass'])
self.MIST_df = self.MIST_df.reset_index(drop=True)
self.ages = self.MIST_df.age.unique()
# The MIST columns that will be interpolated (initial, currentmass, EEP,
# and all input filters)
self._interp_cols = ['initial_mass', 'star_mass', 'EEP']
for f in self.filters:
c = f.MIST_column
c_alt = f.MIST_column_alt
if c in self.MIST_df.columns:
self._interp_cols.append(c)
elif c_alt in self.MIST_df.columns:
self._interp_cols.append(c_alt)
else:
print((c, c_alt))
raise ValueError('Filter does not have a valid MIST_column')
self.MIST_gb = self.MIST_df.groupby(['age', 'feh'])[self._interp_cols]
def get_isochrone(self, age, feh, downsample=5, mag_system=None):
"""Interpolate MIST isochrones for given age and metallicity
Arguments:
age ---
feh ---
downsample ---
mag_system ---
Output:
mags -- 2D array of magnitudes (DxN, where D is number of filters
the model was initialized with)
imass -- array of initial masses (N)
cmass -- array of current masses (N)
"""
mag_system = mag_system or self.default_system
mag_system = mag_system.lower()
if mag_system not in self.conversions.keys():
warn(('mag_system {0:s} not in list of magnitude '
'conversions. Reverting to Vega'.format(mag_system)))
conversions = self.conversions['vega']
else:
conversions = self.conversions[mag_system]
# Find closest age in MIST database
age = self.ages[np.abs(self.ages - age).argmin()]
nearest_feh = self._feh_arr[np.abs(self._feh_arr - feh).argmin()]
if np.isclose(nearest_feh, feh, atol=0.01):
feh = nearest_feh
inter = self.MIST_gb.get_group((age, feh)).values
# Interpolate/extrapolate for other metallicities
else:
if feh < np.min(self._feh_arr):
fehlow = np.min(self._feh_arr)
else:
fehlow = np.max(self._feh_arr[self._feh_arr <= feh])
if feh > np.max(self._feh_arr):
fehhigh = np.max(self._feh_arr)
else:
fehhigh = np.min(self._feh_arr[self._feh_arr >= feh])
frac_between = (feh - fehlow) / np.abs(fehhigh - fehlow)
if (frac_between >= 2) or (frac_between <= -1):
raise ValueError('Extrapolating metallicity more than one '
'entire metallicity bin')
valslow = self.MIST_gb.get_group((age, fehlow)).values
valshigh = self.MIST_gb.get_group((age, fehhigh)).values
inter = _interp_arrays(valslow, valshigh, frac_between)
initial_mass = inter[::downsample, 0]
current_mass = inter[::downsample, 1]
eep = inter[::downsample, 2]
mags = (inter[::downsample, 3:] + conversions).T
return mags, initial_mass, current_mass, eep
def model_galaxy(self, galaxy, lum_cut=np.inf, mag_system=None,
downsample=5, return_mass=False):
weights = np.empty((1, 0), dtype=float)
magnitudes = np.empty((self.num_filters, 0), dtype=float)
initial_mass = np.empty((1, 0), dtype=float)
current_mass = np.empty((1, 0), dtype=float)
eeps = np.empty((1, 0), dtype=float)
# Collect the isochrones from each bin
for age, feh, sfh, d_mod in galaxy.iter_SSPs():
mags, i_mass, c_mass, eep = self.get_isochrone(
age, feh, mag_system=mag_system, downsample=downsample)
imf = galaxy.imf_func(i_mass, **galaxy.imf_kwargs)
weights = np.append(weights, imf*sfh)
mags += d_mod
magnitudes = np.append(magnitudes, mags, axis=-1)
initial_mass = np.append(initial_mass, i_mass)
current_mass = np.append(current_mass, c_mass)
eeps = np.append(eeps, eep)
if not np.isinf(lum_cut):
lum = np.power(10., -0.4*magnitudes)
mean_lum = np.average(lum, weights=weights, axis=1)
to_keep = (lum.T / mean_lum >= lum_cut).sum(axis=1) == 0
weights = weights[to_keep]
magnitudes = magnitudes[:, to_keep]
initial_mass = initial_mass[to_keep]
current_mass = current_mass[to_keep]
eeps = eeps[to_keep]
if return_mass:
return weights, magnitudes, initial_mass, current_mass, eeps
else:
return weights, magnitudes
def get_stellar_mass(self, galaxy, downsample=5):
imf, _, _, c_mass, _ = self.model_galaxy(galaxy,
downsample=downsample,
return_mass=True)
return (imf * c_mass).sum()
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RVarselrf(RPackage):
"""Variable Selection using Random Forests.
Variable selection from random forests using both backwards variable
elimination (for the selection of small sets of non-redundant variables)
and selection based on the importance spectrum (somewhat similar to scree
plots; for the selection of large, potentially highly-correlated
variables). Main applications in high-dimensional data (e.g., microarray
data, and other genomics and proteomics applications)."""
cran = "varSelRF"
version('0.7-8', sha256='719487fb560cb4733816bafe4cbc958a132674825e3b9d4f82ce8f2003cd8940')
depends_on('r@2.0.0:', type=('build', 'run'))
depends_on('r-randomforest', type=('build', 'run'))
|
# python3
"""Definitions for Python AST objects.
AST nodes are not very convenient for comparing two files. Instead, they should
be parsed into these definitions. Each Definition subclass exposes the
attributes of a particular kind of AST node.
"""
from typing import ClassVar, Dict, Optional, List, Union
import dataclasses
from typed_ast import ast3
@dataclasses.dataclass
class Definition:
"""Base class for AST definitions.
Definition tracks the basic information of AST nodes, namely the identifier
and location of the node. The location is useful for reporting errors.
Attributes:
name: The node's identifier.
source: The source file that contains the definition.
lineno: The line in the source file that contains the definition.
col_offset: The column offset within the line of the source definition.
"""
name: str
source: str
lineno: int
col_offset: int
kind: ClassVar[str]
def __post_init__(self):
# This method is automatically called by __init__. Subclasses override it
# to set full_name to "%{kind} %{name}", e.g. "function doThing".
self.full_name = f"{self.kind} {self.name}"
self.location = f"{self.source}:{self.lineno}"
@dataclasses.dataclass
class Argument(Definition):
"""Represents an argument in a function definition.
While Argument records the existence of a default value, it does not track the
actual default value. The value is not needed for checking type stubs.
Attributes:
has_default: Whether a default value is provided for the argument.
"""
has_default: bool
kind: ClassVar[str] = "argument"
@classmethod
def from_node(cls, node: ast3.arg, source: str = ""):
return cls(node.arg, source, node.lineno, node.col_offset, False)
@dataclasses.dataclass
class Function(Definition):
"""Represents a function definition.
Example function definition:
@some_dec
def f(a, b=1, *c, d, e=2, **f): ...
See Attributes below for which parts of the definition correspond to
attributes in this class.
Attributes:
params: The list of positional parameters. e.g. [a, b]
vararg: The variable-length argument. e.g. c
kwonlyargs: The list of keyword-only arguments. e.g. [d, e]
kwarg: The keyword argument. e.g. f
decorators: The list of names of decorators on this function.
e.g. ["some_dec"]
is_async: Tracks whether this function is marked async.
"""
params: List[Argument]
vararg: Optional[Argument]
kwonlyargs: List[Argument]
kwarg: Optional[Argument]
decorators: List[str]
is_async: bool
kind: ClassVar[str] = "function"
@classmethod
def from_node(cls, node: Union[ast3.FunctionDef, ast3.AsyncFunctionDef],
source: str = ""):
"""Transform an AST function node into a Function definition.
Arguments:
node: The ast3.FunctionDef or ast3.AsyncFunctionDef to transform.
source: (Optional) The source file of this definition.
Returns:
A Function derived from the given AST node.
"""
params = [Argument.from_node(arg) for arg in node.args.args]
# If there are n parameters and m default arguments, m <= n, then each param
# in params[n-m:n] has a default argument.
for i in range(1, len(node.args.defaults)+1):
params[-i].has_default = True
kwonlyargs = [Argument.from_node(arg) for arg in node.args.kwonlyargs]
# kw_defaults has an entry for each keyword-only argument, with either None
# or the parsed value.
for i, default in enumerate(node.args.kw_defaults):
kwonlyargs[i].has_default = default is not None
if node.args.vararg:
vararg = Argument.from_node(node.args.vararg)
else:
vararg = None
if node.args.kwarg:
kwarg = Argument.from_node(node.args.kwarg)
else:
kwarg = None
# Decorators are expressions, but we only care about their names. We can
# discard the decorators that don't have names, which shouldn't happen.
decorators = _find_all_names(node.decorator_list)
return cls(name=node.name,
source=source,
lineno=node.lineno,
col_offset=node.col_offset,
params=params,
vararg=vararg,
kwonlyargs=kwonlyargs,
kwarg=kwarg,
decorators=decorators,
is_async=isinstance(node, ast3.AsyncFunctionDef))
@dataclasses.dataclass
class Variable(Definition):
"""Represents a variable definition.
Variables that appear in type stubs are either global variables or class
attributes.
"""
kind: ClassVar[str] = "variable"
@classmethod
def from_node(cls, node: ast3.Name, source: str = ""):
return cls(node.id, source, node.lineno, node.col_offset)
@dataclasses.dataclass
class Class(Definition):
"""Represents a class.
Attributes:
bases: List of base classes by name.
keyword_bases: Dictionary of key to class name. This is used for constructs
like Python 3's metaclasses: class A(metaclass=B): ...
decorators: List of decorators on this class by name.
fields: List of class and method attributes. Type stubs do not differentiate
between the two types.
methods: List of functions defined in the class.
nested_classes: List of nested classes.
"""
bases: List[str]
keyword_bases: Dict[str, str]
decorators: List[str]
fields: Dict[str, List[Variable]]
methods: Dict[str, List[Function]]
nested_classes: Dict[str, List["Class"]]
kind: ClassVar[str] = "class"
@classmethod
def from_node(cls, node: ast3.ClassDef, fields: List[Variable],
methods: List[Function], nested_classes: List["Class"],
source: str = ""):
"""Transform an ast3.ClassDef into a Class definition.
Due to the complexity of parsing classes, the caller must provide the
attributes of the class separately.
Arguments:
node: The ClassDef itself.
fields: List of fields in the class.
methods: List of functions defined in the class.
nested_classes: List of nested classes defined in the class.
source: (Optional) The source file of this definition.
Returns:
A Class definition representing the ast3.ClassDef and its attributes.
"""
bases = _find_all_names(node.bases)
keyword_bases = _find_all_names(node.keywords)
decorators = _find_all_names(node.decorator_list)
return cls(name=node.name,
source=source,
lineno=node.lineno,
col_offset=node.col_offset,
bases=bases,
keyword_bases=keyword_bases,
decorators=decorators,
fields=fields,
methods=methods,
nested_classes=nested_classes)
def _find_name(root: ast3.AST) -> Optional[str]:
"""Returns the first "name" field or ast3.Name expression in the AST."""
for node in ast3.walk(root):
if isinstance(node, ast3.Name):
return node.id
if isinstance(node, ast3.Attribute):
# value.attr, e.g. self.x. We want attr.
return node.attr
if isinstance(node, ast3.Subscript):
# value[slice], e.g. x[1] or x[1:2]. We want value.
return _find_name(node.value)
name = getattr(node, "name", None)
if name:
return name
return None
def _find_all_names(nodes: List[ast3.AST]) -> List[str]:
"""Finds every name in a list of nodes. Ignores nodes with no name."""
names = (_find_name(node) for node in nodes)
return [name for name in names if name]
|
import pandas as pd
import numpy as np
import copy
import pickle
from datetime import datetime,timedelta
import os
import multiprocessing
import random
data_path = '../data/'
TEST = True
if TEST:
df = pd.read_csv(data_path + '201407_new.csv',sep = ';')
else:
data_path = '../data/'
df1 = pd.read_csv(data_path + '201407_new.csv',sep = ';')
df2 = pd.read_csv(data_path + '201408_new.csv',sep = ';')
df3 = pd.read_csv(data_path + '201409_new.csv',sep = ';')
df4 = pd.read_csv(data_path + '201410_new.csv',sep = ';')
df5 = pd.read_csv(data_path + '201411_new.csv',sep = ';')
df6 = pd.read_csv(data_path + '201412_new.csv',sep = ';')
df7 = pd.read_csv(data_path + '201501_new.csv',sep = ';')
df8 = pd.read_csv(data_path + '201502_new.csv',sep = ';')
df9 = pd.read_csv(data_path + '201503_new.csv',sep = ';')
df10 = pd.read_csv(data_path + '201504_new.csv',sep = ';')
df11 = pd.read_csv(data_path + '201505_new.csv',sep = ';')
df12 = pd.read_csv(data_path + '201506_new.csv',sep = ';')
df13 = pd.read_csv(data_path + '201507_new.csv',sep = ';')
df14 = pd.read_csv(data_path + '201508_new.csv',sep = ';')
df15 = pd.read_csv(data_path + '201509_new.csv',sep = ';')
df16 = pd.read_csv(data_path + '201510_new.csv',sep = ';')
df17 = pd.read_csv(data_path + '201511_new.csv',sep = ';')
df18 = pd.read_csv(data_path + '201512_new.csv',sep = ';')
df19 = pd.read_csv(data_path + '201601_new.csv',sep = ';')
df20 = pd.read_csv(data_path + '201602_new.csv',sep = ';')
df21 = pd.read_csv(data_path + '201603_new.csv',sep = ';')
df22 = pd.read_csv(data_path + '201604_new.csv',sep = ';')
df23 = pd.read_csv(data_path + '201605_new.csv',sep = ';')
df24 = pd.read_csv(data_path + '201606_new.csv',sep = ';')
df25 = pd.read_csv(data_path + '201607_new.csv',sep = ';')
df26 = pd.read_csv(data_path + '201608_new.csv',sep = ';')
df27 = pd.read_csv(data_path + '201609_new.csv',sep = ';')
df = pd.concat([df1,df2,df3,df4,df5,df6,df7,df8,df9,df10,
df11,df12,df13,df14,df15,df16,df17,df18,df19,df20,
df21,df22,df23,df24,df25,df26,df27]) # consider 24 month
df = df.loc[:,['csc_phy_id', 'txn_subtype_co']].drop_duplicates()
df = df.drop_duplicates(['csc_phy_id'])
df.to_csv(data_path + 'sample_card_type.csv',index=False)
# process_data(935962579)
print(pd.unique(df['txn_subtype_co']))
|
from urllib.parse import urlparse
import pandas as pd
FILENAME = "LC_URLHAUS_Domains_List.txt"
url="https://urlhaus.abuse.ch/downloads/text/"
domains = pd.read_csv(url, skiprows=9, names=['url'], error_bad_lines=False, warn_bad_lines=False)
domains['source'] = ' urlhaus'
domains['url'] = domains['url'].apply(lambda url: urlparse(url).netloc.split(':')[0])
domains = domains.drop_duplicates(subset='url')
domains.to_csv(FILENAME, sep=':', encoding='utf-8', header=False, index=False)
|
from flask import Blueprint, render_template, url_for
from simplex import (
SimplexPage, SIMUI, SimplexLayout
)
index_view_bp = Blueprint('index_view', __name__)
@index_view_bp.route('/')
def index():
_sidebar_items = [
SimplexLayout.SimplexSidebarItem().add_content(
SIMUI.SIMUIListItem()
.set_name('Some Header')
.set_type('label')
.set_selected(False)
.get_tag_content()
),
SimplexLayout.SimplexSidebarItem().add_content(
SIMUI.SIMUIListItem()
.set_name('Manage Data')
.set_type('href')
.set_href(url_for('data_view.manage_data'))
.set_selected(True)
.get_tag_content()
)]
page = SimplexPage.SimplexStandardPage()
page.sidebar.items = _sidebar_items
return render_template('index.html', content=page.render())
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
# coding: utf-8
import os
class NoDataError(Exception):
"""historyやindicatorのデータにアクセスした際に、取得対象のデータが存在しない場合に発生する例外です。
データが存在しない場合は「データが足りていない(※1)」や「範囲外へのアクセス(※2)」などがあります。
※1: 25日移動平均線の場合、1から24日目まではデータがないので集計できません。
※2: 明日以降などのデータにはアクセスできないようになっています。"""
pass
class OrderTypeError(Exception):
"""予期せぬorder_typeが見つかったときに発生する例外です。プログラムにバグがあるので修正する必要があります。"""
def __init__(self, order_type):
"""コンストラクタ
Args:
order_type: プログラム中で出現した未知の注文種別
"""
super().__init__('未知のorder_type [{}] が見つかりました。プログラムを修正してください。'
.format(order_type))
class CommandError(Exception):
"""コマンド失敗時に発生する例外です。各コマンドはこの例外を受けたら処理を停止し、
画面にメッセージを表示します。"""
pass
class DuplicateFindkeyError(CommandError): # 継承OK?
"""FinderMixinを継承したクラスのクラス変数__findekey__が重複している場合に発生する例外です。
この例外を見たら_findkeyの重複を解決する必要があります。"""
def __init__(self, rule_type, findkey):
"""コンストラクタ
Args:
rule_type: findkeyの種別(indicaotrsやconditionsなど)
findkey: クラスを一意に特定するためのキー
"""
super().__init__('{}のfindkey: [{}] が重複しています。'.format(rule_type, findkey))
class NewInstanceError(CommandError): # 継承OK?
"""インスタンス生成に失敗した場合に発生する例外です。
大抵はルールファイルを見直す必要があります。"""
def __init__(self, msg=None, rule_type=None, findkey=None, classname=None, err=None):
"""コンストラクタ
Args:
msg: メッセージ
rule_type: findkeyの種別(indicaotrsやconditionsなど)
findkey: クラスを一意に特定するためのキー
classname: クラス名
err: 発生した例外
※abstractmethodをオーバーライドしていない場合や、
設定ファイルの引数のミスなどで発生した例外がセットされています。
引数のミスに関しては、プログラム側でチェックして、
予期せぬ例外が発生しないようにした方が原因が判明しやすくなります。
なお、引数をチェックする場合はArgumentErrorをご利用ください。
"""
msgarr = []
msgarr.append('インスタンスの生成に失敗しました。')
if rule_type:
msgarr.append('rule_type: {}'.format(rule_type))
if findkey:
msgarr.append('findkey: {}'.format(findkey))
if classname:
msgarr.append('クラス名: {}'.format(classname))
if msg:
msgarr.append(str(msg))
if err:
msgarr.append('例外: {}'.format(err))
super().__init__(os.linesep.join(msgarr))
class ArgumentError(CommandError):
"""引数に誤りがあった場合に発生する例外です。"""
def __init__(self, name, strtype=None):
"""
Args:
name: 引数の名前
strtype: 引数の正しい型(文字列)
"""
msg = '引数[{}]が指定されていないか、型が正しくありません。'.format(name)
if strtype is not None:
msg += '型は[{}]で指定してください。'.format(strtype)
super().__init__(msg)
|
"""
Polizei Brandenburg: App
Polizei Brandenburg Nachrichten, Hochwasser-, Verkehrs- und Waldbrandwarnungen # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import copy
import logging
import multiprocessing
import sys
from http import client as http_client
import urllib3
from deutschland.polizei_brandenburg.exceptions import ApiValueError
JSON_SCHEMA_VALIDATION_KEYWORDS = {
"multipleOf",
"maximum",
"exclusiveMaximum",
"minimum",
"exclusiveMinimum",
"maxLength",
"minLength",
"pattern",
"maxItems",
"minItems",
}
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url
:param api_key: Dict to store API key(s).
Each entry in the dict specifies an API key.
The dict key is the name of the security scheme in the OAS specification.
The dict value is the API key secret.
:param api_key_prefix: Dict to store API prefix (e.g. Bearer)
The dict key is the name of the security scheme in the OAS specification.
The dict value is an API key prefix when generating the auth data.
:param username: Username for HTTP basic authentication
:param password: Password for HTTP basic authentication
:param discard_unknown_keys: Boolean value indicating whether to discard
unknown properties. A server may send a response that includes additional
properties that are not known by the client in the following scenarios:
1. The OpenAPI document is incomplete, i.e. it does not match the server
implementation.
2. The client was generated using an older version of the OpenAPI document
and the server has been upgraded since then.
If a schema in the OpenAPI document defines the additionalProperties attribute,
then all undeclared properties received by the server are injected into the
additional properties map. In that case, there are undeclared properties, and
nothing to discard.
:param disabled_client_side_validations (string): Comma-separated list of
JSON schema validation keywords to disable JSON schema structural validation
rules. The following keywords may be specified: multipleOf, maximum,
exclusiveMaximum, minimum, exclusiveMinimum, maxLength, minLength, pattern,
maxItems, minItems.
By default, the validation is performed for data generated locally by the client
and data received from the server, independent of any validation performed by
the server side. If the input data does not satisfy the JSON schema validation
rules specified in the OpenAPI document, an exception is raised.
If disabled_client_side_validations is set, structural validation is
disabled. This can be useful to troubleshoot data validation problem, such as
when the OpenAPI document validation rules do not match the actual API data
received by the server.
:param server_index: Index to servers configuration.
:param server_variables: Mapping with string values to replace variables in
templated server configuration. The validation of enums is performed for
variables with defined enum values before.
:param server_operation_index: Mapping from operation ID to an index to server
configuration.
:param server_operation_variables: Mapping from operation ID to a mapping with
string values to replace variables in templated server configuration.
The validation of enums is performed for variables with defined enum values before.
:param ssl_ca_cert: str - the path to a file of concatenated CA certificates
in PEM format
"""
_default = None
def __init__(
self,
host=None,
api_key=None,
api_key_prefix=None,
access_token=None,
username=None,
password=None,
discard_unknown_keys=False,
disabled_client_side_validations="",
server_index=None,
server_variables=None,
server_operation_index=None,
server_operation_variables=None,
ssl_ca_cert=None,
):
"""Constructor"""
self._base_path = (
"https://polizei.brandenburg.de/ipa_api" if host is None else host
)
"""Default Base url
"""
self.server_index = 0 if server_index is None and host is None else server_index
self.server_operation_index = server_operation_index or {}
"""Default server index
"""
self.server_variables = server_variables or {}
self.server_operation_variables = server_operation_variables or {}
"""Default server variables
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.access_token = access_token
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.discard_unknown_keys = discard_unknown_keys
self.disabled_client_side_validations = disabled_client_side_validations
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("polizei_brandenburg")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = "%(asctime)s %(levelname)s %(message)s"
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = ssl_ca_cert
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ""
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Enable client side validation
self.client_side_validation = True
# Options to pass down to the underlying urllib3 socket
self.socket_options = None
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ("logger", "logger_file_handler"):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if name == "disabled_client_side_validations":
s = set(filter(None, value.split(",")))
for v in s:
if v not in JSON_SCHEMA_VALIDATION_KEYWORDS:
raise ApiValueError("Invalid keyword: '{0}''".format(v))
self._disabled_client_side_validations = s
@classmethod
def set_default(cls, default):
"""Set default instance of configuration.
It stores default configuration, which can be
returned by get_default_copy method.
:param default: object of Configuration
"""
cls._default = copy.deepcopy(default)
@classmethod
def get_default_copy(cls):
"""Return new instance of configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration passed by the set_default method.
:return: The configuration object.
"""
if cls._default is not None:
return copy.deepcopy(cls._default)
return Configuration()
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in self.logger.items():
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in self.logger.items():
logger.setLevel(logging.DEBUG)
# turn on http_client debug
http_client.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in self.logger.items():
logger.setLevel(logging.WARNING)
# turn off http_client debug
http_client.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier, alias=None):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:param alias: The alternative identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(
identifier, self.api_key.get(alias) if alias is not None else None
)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(basic_auth=username + ":" + password).get(
"authorization"
)
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
auth = {}
return auth
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return (
"Python SDK Debug Report:\n"
"OS: {env}\n"
"Python Version: {pyversion}\n"
"Version of the API: 1.0.0\n"
"SDK Package Version: 1.0.0".format(env=sys.platform, pyversion=sys.version)
)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
"url": "https://polizei.brandenburg.de/ipa_api",
"description": "No description provided",
}
]
def get_host_from_settings(self, index, variables=None, servers=None):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:param servers: an array of host settings or None
:return: URL based on host settings
"""
if index is None:
return self._base_path
variables = {} if variables is None else variables
servers = self.get_host_settings() if servers is None else servers
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers))
)
url = server["url"]
# go through variables and replace placeholders
for variable_name, variable in server.get("variables", {}).items():
used_value = variables.get(variable_name, variable["default_value"])
if "enum_values" in variable and used_value not in variable["enum_values"]:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name], variable["enum_values"]
)
)
url = url.replace("{" + variable_name + "}", used_value)
return url
@property
def host(self):
"""Return generated host."""
return self.get_host_from_settings(
self.server_index, variables=self.server_variables
)
@host.setter
def host(self, value):
"""Fix base path."""
self._base_path = value
self.server_index = None
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# To create a valid debug settings.py we need to intentionally pollute our
# file with all of the content found in the master configuration.
from tempfile import TemporaryDirectory
from .. import * # noqa F403
# Debug is always on when running in debug mode
DEBUG = True
# Allowed hosts is not required in debug mode
ALLOWED_HOSTS = []
# A temporary directory to work in for unit testing
APPRISE_CONFIG_DIR = TemporaryDirectory().name
# Setup our runner
TEST_RUNNER = 'core.settings.pytest.runner.PytestTestRunner'
|
import pytest
@pytest.fixture
def parser():
from math_expression_parser import Parser
return Parser()
@pytest.mark.parametrize('test_input, expected', [
('3 + 8', '3 8 +'),
('( 3 + 4 * 2 / ( 1 - 5 ) ^ 2 ^ 3 )', '3 4 2 * 1 5 - 2 3 ^ ^ / +'),
])
def test_eval(test_input, expected, parser):
assert parser.infix_to_rpn(test_input) == expected
|
"""
Perform inviscid drag minimization of initially rectangular wing with respect to
the chord distribution, subject to a lift and reference area constraint. Similar
to the twist optimization, the expected result from lifting line theory should
produce an elliptical lift distrbution. Check output directory for Tecplot
solution files.
"""
import numpy as np
import openmdao.api as om
from openaerostruct.geometry.utils import generate_mesh
from openaerostruct.geometry.geometry_group import Geometry
from openaerostruct.aerodynamics.aero_groups import AeroPoint
# Instantiate the problem and the model group
prob = om.Problem()
# Define flight variables as independent variables of the model
indep_var_comp = om.IndepVarComp()
indep_var_comp.add_output("v", val=248.136, units="m/s") # Freestream Velocity
indep_var_comp.add_output("alpha", val=5.0, units="deg") # Angle of Attack
indep_var_comp.add_output("beta", val=0.0, units="deg") # Sideslip angle
indep_var_comp.add_output("omega", val=np.zeros(3), units="deg/s") # Rotation rate
indep_var_comp.add_output("Mach_number", val=0.0) # Freestream Mach number
indep_var_comp.add_output("re", val=1.0e6, units="1/m") # Freestream Reynolds number
indep_var_comp.add_output("rho", val=0.38, units="kg/m**3") # Freestream air density
indep_var_comp.add_output("cg", val=np.zeros((3)), units="m") # Aircraft center of gravity
# Add vars to model, promoting is a quick way of automatically connecting inputs
# and outputs of different OpenMDAO components
prob.model.add_subsystem("flight_vars", indep_var_comp, promotes=["*"])
# Create a dictionary to store options about the surface
mesh_dict = {
"num_y": 35,
"num_x": 11,
"wing_type": "rect",
"symmetry": True,
"span": 10.0,
"chord": 1,
"span_cos_spacing": 1.0,
"chord_cos_spacing": 1.0,
}
# Generate half-wing mesh of rectangular wing
mesh = generate_mesh(mesh_dict)
# Define input surface dictionary for our wing
surface = {
# Wing definition
"name": "wing", # name of the surface
"type": "aero",
"symmetry": True, # if true, model one half of wing
# reflected across the plane y = 0
"S_ref_type": "projected", # how we compute the wing area,
# can be 'wetted' or 'projected'
"chord_cp": np.ones(3), # Define chord using 3 B-spline cp's
# distributed along span
"mesh": mesh,
# Aerodynamic performance of the lifting surface at
# an angle of attack of 0 (alpha=0).
# These CL0 and CD0 values are added to the CL and CD
# obtained from aerodynamic analysis of the surface to get
# the total CL and CD.
# These CL0 and CD0 values do not vary wrt alpha.
"CL0": 0.0, # CL of the surface at alpha=0
"CD0": 0.0, # CD of the surface at alpha=0
# Airfoil properties for viscous drag calculation
"k_lam": 0.05, # percentage of chord with laminar
# flow, used for viscous drag
"t_over_c": 0.12, # thickness over chord ratio (NACA0015)
"c_max_t": 0.303, # chordwise location of maximum (NACA0015)
# thickness
"with_viscous": False, # if true, compute viscous drag,
"with_wave": False,
} # end of surface dictionary
name = surface["name"]
# Add geometry to the problem as the name of the surface.
# These groups are responsible for manipulating the geometry of the mesh,
# in this case spanwise twist.
geom_group = Geometry(surface=surface)
prob.model.add_subsystem(name, geom_group)
# Create the aero point group for this flight condition and add it to the model
aero_group = AeroPoint(surfaces=[surface], rotational=True)
point_name = "aero_point_0"
prob.model.add_subsystem(
point_name, aero_group, promotes_inputs=["v", "alpha", "beta", "omega", "Mach_number", "re", "rho", "cg"]
)
# Connect the mesh from the geometry component to the analysis point
prob.model.connect(name + ".mesh", point_name + "." + name + ".def_mesh")
# Perform the connections with the modified names within the
# 'aero_states' group.
prob.model.connect(name + ".mesh", point_name + ".aero_states." + name + "_def_mesh")
# Set optimizer as model driver
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options["debug_print"] = ["nl_cons", "objs", "desvars"]
# Setup problem and add design variables, constraint, and objective
prob.model.add_design_var("alpha", lower=-10.0, upper=15.0)
prob.model.add_design_var("wing.chord_cp", lower=1e-3, upper=5.0)
prob.model.add_constraint(point_name + ".wing_perf.CL", equals=0.5)
prob.model.add_constraint(point_name + ".wing.S_ref", equals=10.0)
prob.model.add_objective(point_name + ".wing_perf.CD", scaler=1e4)
# Set up the problem
prob.setup()
# Run optimization
prob.run_driver()
|
from __future__ import annotations
import os
import random
from typing import Match
from bot.config import Config
from bot.data import command
from bot.data import format_msg
NO_QUOTES = '@{} sorry, there are no quotes :('
FILES_WITH_NO_QUOTES = set()
@command('!quote')
async def cmd_quote(config: Config, match: Match[str]) -> str:
filenames = [
filename for filename in os.listdir('logs')
if filename not in FILES_WITH_NO_QUOTES
]
for filename in random.sample(filenames, len(filenames)):
full_filename = os.path.join('logs', filename)
quote = random_quote(config, match, full_filename)
if quote:
return format_msg(match, f'"{quote}"')
FILES_WITH_NO_QUOTES.add(filename)
return format_msg(match, NO_QUOTES.format(match['user']))
def random_quote(config: Config, match: Match[str], filename: str) -> str:
with open(filename) as f:
user_logs = [
log for log in f.readlines()
if username(log) != config.username
]
messages = [message(log) for log in user_logs]
quotes = [
message for message in messages
if not message.startswith('!')
]
if not quotes:
return ''
return random.choice(quotes)
def username(log: str) -> str:
return log[log.index('<') + 1: log.index('>')]
def message(log: str) -> str:
return log.partition(' ')[2][:-1]
|
# BSD 3-Clause License
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the psutil authors nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from patrickstar.core import PSPreProcessCtx, PatrickStarClient
from patrickstar.core.memtracer import RuntimeMemTracer
from patrickstar.utils import logger, log_dist
from .engine import PatrickStarEngine
import time
DEFAULT_CHUNK_SIZE = 32 * 1024 * 1024
def initialize_engine(model_func, local_rank, config=None, client=None):
"""Initialize the PatrickStar Engine.
Arguments:
model_func: Required: nn.module class before apply any wrappers
client: Required: PatrickStarClient for orchestrating chunks.
config: Optional: config json for optimizer.
Returns:
A tuple of ``engine`` and ``optimizer``
* ``engine``: PatrickStar runtime engine which wraps the client model for distributed training.
* ``optimizer``: Wrapped optimizer if a user defined ``optimizer`` is supplied, or if
optimizer is specified in json config else ``None``.
"""
if isinstance(model_func, torch.nn.Module):
logger.debug(
"Passing nn.Module into initialize_engine. "
"Make sure you have intialized the model within PSPreProcessCtx"
)
assert client is not None, "Must pass the client when passing a nn.Module."
model = model_func
else:
assert callable(model_func), "model_func need to be callable."
if config is None:
default_chunk_size = DEFAULT_CHUNK_SIZE
release_after_init = False
use_cpu_embedding = True
else:
default_chunk_size = config.get("default_chunk_size", DEFAULT_CHUNK_SIZE)
release_after_init = config.get("release_after_init", False)
use_cpu_embedding = config.get("use_cpu_embedding", True)
client = PatrickStarClient(
rank=local_rank,
default_chunk_size=default_chunk_size,
config=config.get("client", None),
)
start_time = time.time()
log_dist("begin initialize the model parameters...")
with PSPreProcessCtx(
client=client,
dtype=torch.float,
release_after_init=release_after_init,
use_cpu_embedding=use_cpu_embedding,
):
model = model_func()
end_time = time.time()
log_dist(
f"finished initialized the model parameters... {end_time - start_time} s"
)
engine = PatrickStarEngine(model=model, client=client, config=config)
client.start_mem_tracer()
return (engine, engine.optimizer)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualHubsOperations:
"""VirtualHubsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
virtual_hub_name: str,
**kwargs
) -> "_models.VirtualHub":
"""Retrieves the details of a VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualHub, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.VirtualHub
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_hub_name: str,
virtual_hub_parameters: "_models.VirtualHub",
**kwargs
) -> "_models.VirtualHub":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_parameters, 'VirtualHub')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_hub_name: str,
virtual_hub_parameters: "_models.VirtualHub",
**kwargs
) -> AsyncLROPoller["_models.VirtualHub"]:
"""Creates a VirtualHub resource if it doesn't exist else updates the existing VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param virtual_hub_parameters: Parameters supplied to create or update VirtualHub.
:type virtual_hub_parameters: ~azure.mgmt.network.v2018_11_01.models.VirtualHub
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualHub or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.VirtualHub]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
virtual_hub_parameters=virtual_hub_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
virtual_hub_name: str,
virtual_hub_parameters: "_models.TagsObject",
**kwargs
) -> "_models.VirtualHub":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
virtual_hub_name: str,
virtual_hub_parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.VirtualHub"]:
"""Updates VirtualHub tags.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param virtual_hub_parameters: Parameters supplied to update VirtualHub tags.
:type virtual_hub_parameters: ~azure.mgmt.network.v2018_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualHub or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.VirtualHub]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
virtual_hub_parameters=virtual_hub_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
virtual_hub_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_hub_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.ListVirtualHubsResult"]:
"""Lists all the VirtualHubs in a resource group.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.ListVirtualHubsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.ListVirtualHubsResult"]:
"""Lists all the VirtualHubs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.ListVirtualHubsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualHubs'} # type: ignore
|
"""
@name: cluster_perturbation_validation.py
@description:
Look at differences in weight differences to find appropriate noise level
@author: Christopher Brittin
@email: "cabrittin"+ <at>+ "gmail"+ "."+ "com"
@date: 2019-12-05
"""
import sys
sys.path.append(r'./preprocess')
import os
from configparser import ConfigParser,ExtendedInterpolation
import argparse
import networkx as nx
import numpy as np
import ioaux
import matplotlib.pyplot as plt
import matplotlib as mpl
from tqdm import tqdm
from itertools import combinations
import multiprocessing_on_dill as mp
import time
from scipy.stats import norm
from connectome.load import from_db
from connectome.load import reference_graphs
#from connectome.format_graphs import *
from connectome.format_graphs import consensus_graph,filter_graph_edge,normalize_edge_weight
from measures import probability_dist
from cluster_population_models import *
mpl.rcParams['xtick.labelsize'] = 5
mpl.rcParams['ytick.labelsize'] = 5
#CONFIG = os.environ['CONFIG']
CONFIG = 'configs/config.ini'
def plot_log(V,perturbed=False):
_V = np.log(V)
mu = np.mean(_V[:,0])
std = np.std(_V[:,0])
__V = _V - mu
_V = (_V - mu) / std
Z = np.zeros([V.shape[0],2])
Z[:,0] = _V[:,0]
Z[:,1] = np.std(_V[:,1:],axis=1)
print(mu,std)
#mu = np.mean(Z[np.where(Z[:,1]<1)[0],1])
mu = np.mean(Z[:,1])
#mu = np.median(Z[:,1])
fig,_ax = plt.subplots(1,3,figsize=(5.4,1.8))
FS = 7
ax = _ax[0]
ax.hist(__V[:,0],bins=60,range=(-3,3),density=True)
x = np.linspace(-5,5,100)
y = norm(0,std)
ax.plot(x,y.pdf(x),'r-',linewidth=1)
ax.set_xlim([-3,3])
ax.set_ylim([0,0.6])
ax.set_yticks([0,0.3,0.6])
ax.set_xlabel('log of membrane contact area',fontsize=FS)
ax.set_ylabel('Frequency',fontsize=FS)
ax.set_title('$\mathbb{M}^4$ contacts',fontsize=FS)
if perturbed: ax.set_title('$\widetilde{\mathbb{M}}^4$ contacts',fontsize=FS)
ax.text(0.5,0.8,'STD$=%1.2f$'%std,transform=ax.transAxes,fontsize=6)
ax = _ax[1]
ax.scatter(Z[:,0],Z[:,1],s=1)
ax.set_ylim([0,2])
ax.set_yticks([0,1,2])
#ax.axhline(1,color='r',linestyle='dashed')
ax.set_xlabel('log of mean',fontsize=FS)
ax.set_ylabel('Standard deviation',fontsize=FS)
ax.set_title('Variability across 4 datasets',fontsize=FS)
ax = _ax[2]
ax.hist(Z[:,1],bins=40,range=(0,2))
ax.set_ylim([0,150])
ax.set_yticks([0,50,100,150])
ax.set_ylabel("# of contacts sites",fontsize=FS)
ax.set_xlabel("Standard deviation",fontsize=FS)
ax.axvline(mu,linestyle='dashed',color='r')
ax.text(0.4,0.8,"Mean$=%1.2f$"%mu,transform=ax.transAxes,fontsize=6)
#plt.savefig('results/cluster_revision/noise_distribution.svg')
plt.tight_layout()
#plt.show()
def plot_linear(V):
Z = np.zeros([V.shape[0],2])
Z[:,0] = V[:,0]
Z[:,1] = np.std(V[:,1:],axis=1)
#idx = np.where(Z[:,0] < 40000)[0]
#Z = Z[idx,:]
#p = np.polyfit(Z[:,0],Z[:,1],1)
#print(p)
fig,ax = plt.subplots(1,1,figsize=(5,5))
x = np.linspace(0,Z.max(),10)
ax.scatter(Z[:,0],Z[:,1]/Z[:,0],s=2)
#ax.plot(x,x,'r-')
#ax.plot(x,x*p[0] + p[1],'g-')
#mx = Z.max()
ax.set_xlim([0,Z[:,0].max()])
#ax.set_ylim([0,Z[:,1].max()])
ax.set_xlabel('Mean contact area')
ax.set_ylabel('Std/mean contact area')
plt.tight_layout()
#plt.savefig('results/cluster_revision/linear_quotient_noise.svg')
#mu = np.mean(Z[np.where(Z[:,1]<1.2)])
#mu = np.mean(Z[:,1])
#fig,_ax = plt.subplots(1,3,figsize=(7.5,2))
#FS = 10
def plot_noise(_ax,V):
vmax = np.max(V[:,1:])
ax = _ax[0]
for i in range(2,5):
ax.scatter(V[:,1]/vmax,V[:,i]/vmax,s=2,color='b')
z = np.linspace(0,1,11)
ax.plot(z,z,'r-')
ax.set_xlim([0,1])
ax.set_ylim([0,1])
ax.set_xlabel('Equivalent contact',fontsize=10)
ax.set_ylabel('L4 left',fontsize=10)
ax.set_title('Membrane contact areas')
ax = _ax[1]
for i in range(2,5):
y = np.log(np.true_divide(V[:,1],V[:,i]))
x = V[:,1] / np.max(V[:,1])
ax.scatter(x,y,s=2,color='b')
ax.axhline(0,color='r',linewidth=1)
ax.set_xlim([0,1])
ax.set_ylim([-2,2])
ax.set_xlabel('L4 left',fontsize=10)
ax.set_ylabel('log(Equivalent/L4 left)',fontsize=10)
ax.set_title('Membrane contact areas')
plt.tight_layout()
def run(_cfg,fout=None,source_data=None):
wrn = """NOTE: This script pulls directly from the sql databases.
If these databases are not setup then this script will crash"""
print(wrn)
cfg = ConfigParser(interpolation=ExtendedInterpolation())
cfg.read(_cfg)
left = ioaux.read.into_list(cfg['mat']['left_nodes'])
right = ioaux.read.into_list(cfg['mat']['right_nodes'])
lrmap = ioaux.read.into_lr_dict(cfg['mat']['lrmap'])
nodes = ioaux.read.into_list(cfg['mat']['nodes'])
remove = ioaux.read.into_list(cfg['mat']['remove'])
edge_thresh = cfg.getint('params','lower_weight_threshold')
dbs = cfg['input']['databases'].split(',')
G = []
for d in dbs:
D = from_db(d,adjacency=True,chemical=True,electrical=True,remove=remove,dataType='networkx')
D.A = filter_graph_edge(D.A,pct=edge_thresh)
D.split_left_right(left,right)
D.map_right_graphs(lrmap)
G.append(D)
H = [G[0].Al,G[0].Ar,G[1].Al,G[1].Ar]
for g in H: normalize_edge_weight(g)
M = nx.Graph()
consensus_graph(M,H,4,nodes,weight=['weight','wnorm'])
sd = []
V = np.zeros([M.number_of_edges(),5])
for (i,(u,v,w)) in enumerate(M.edges.data('wnorm')):
V[i,0] = w
tmp = [0,0,0,0]
for (j,h) in enumerate(H):
V[i,j+1] = h[u][v]['wnorm']
tmp[j] = h[u][v]['wnorm']
sd.append([u,v,w] + tmp)
ioaux.write.from_list('source_data/ed_fig5_emperical.csv',sd)
lscale = get_log_scale(cfg)
sig = 0.23
_gsizes,Mp,Hp = perturb_data(cfg,['N2U','JSH'],lscale,sig,spatial_domain=0)
sd = []
Vp = np.zeros([Mp.number_of_edges(),5])
for (i,(u,v,w)) in enumerate(Mp.edges.data('wnorm')):
Vp[i,0] = w
tmp = [0,0,0,0]
for (j,h) in enumerate(Hp):
Vp[i,j+1] = h[u][v]['wnorm']
tmp[j] = h[u][v]['wnorm']
sd.append([u,v,w] + tmp)
ioaux.write.from_list('source_data/ed_fig5_pert.csv',sd)
plot_log(V)
#plt.savefig('results/cluster_revision/noise_distribution.svg')
plot_log(Vp,perturbed=True)
#plt.savefig('results/cluster_revision/noise_distribution_perturbed.svg')
#fig,_ax = plt.subplots(1,2,figsize=(5,2))
#plot_noise(_ax,V)
#fig,_ax = plt.subplots(1,2,figsize=(5,2))
#plot_noise(_ax,Vp)
fig,_ax = plt.subplots(1,2,figsize=(3.6,1.8))
x = np.zeros([M.number_of_edges(),2])
for (i,(u,v,w)) in enumerate(M.edges.data('wnorm')):
x[i,0] = w
if Mp.has_edge(u,v): x[i,1] = Mp[u][v]['wnorm']
ax = _ax[0]
ax.scatter(x[:,0]/np.max(x),x[:,1]/np.max(x),s=1)
z = np.linspace(0,1,11)
ax.plot(z,z,'r-')
ax.set_xlim([0,1])
ax.set_ylim([0,1])
ax.set_xlabel('Empirical',fontsize=7)
ax.set_ylabel('Perturbed',fontsize=7)
ax.set_title('Membrane contact areas',fontsize=7)
ax = _ax[1]
x[:,1] = np.log(np.true_divide(x[:,1],x[:,0]))
ax.scatter(x[:,0]/np.max(x[:,0]),x[:,1],s=1)
ax.axhline(0,color='r',linewidth=1)
ax.set_xlim([0,1])
ax.set_ylim([-2,2])
ax.set_xlabel('Empirical',fontsize=7)
ax.set_ylabel('log(Perturbed/Empirical)',fontsize=7)
ax.set_title('Membrane contact areas',fontsize=7)
plt.tight_layout()
#plt.savefig('results/cluster_revision/empirical_vs_perturbed.svg')
plt.show()
if __name__=="__main__":
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-c','--config',
dest = 'config',
action = 'store',
default = CONFIG,
required = False,
help = 'Config file')
params = parser.parse_args()
run(params.config)
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
from builtins import * # NOQA
standard_library.install_aliases() # NOQA
import basetest_dqn_like as base
from basetest_training import _TestBatchTrainingMixin
from chainerrl.agents import SARSA
class TestSARSAOnDiscreteABC(
_TestBatchTrainingMixin, base._TestDQNOnDiscreteABC):
def make_dqn_agent(self, env, q_func, opt, explorer, rbuf, gpu):
return SARSA(
q_func, opt, rbuf, gpu=gpu, gamma=0.9, explorer=explorer,
replay_start_size=100, target_update_interval=100)
class TestSARSAOnContinuousABC(
_TestBatchTrainingMixin, base._TestDQNOnContinuousABC):
def make_dqn_agent(self, env, q_func, opt, explorer, rbuf, gpu):
return SARSA(
q_func, opt, rbuf, gpu=gpu, gamma=0.9, explorer=explorer,
replay_start_size=100, target_update_interval=100)
class TestSARSAOnDiscretePOABC(
_TestBatchTrainingMixin, base._TestDQNOnDiscretePOABC):
def make_dqn_agent(self, env, q_func, opt, explorer, rbuf, gpu):
return SARSA(
q_func, opt, rbuf, gpu=gpu, gamma=0.9, explorer=explorer,
replay_start_size=100, target_update_interval=100,
recurrent=True)
|
#! /usr/bin/env
# -*- encoding:utf-8 -*-
import unittest
import torch
from lempa import sum_product
from lempa import spmat
from torch.autograd import Variable
class TestDecoderModel(unittest.TestCase):
def test_sum_product_decoding(self):
filename = 'data/3x6irRegLDPC/parity_check_matrix.spmat'
pcm = spmat.read_spmat(filename)
codedir = 'data/3x6irRegLDPC'
code = sum_product.Code(parity_check_matrix=pcm)
num_of_iteration = 3
model = sum_product.NeuralSumProductModel(
code, num_of_iteration,
variable_node_normalization=False,
check_node_normalization=False)
y = torch.Tensor([
[1.620803, 0.264281, -0.031637, -0.127654, 0.746347, 1.003543]
for _ in range(5)])
var = 0.794328
llr = Variable(2 * y / var)
output = model(llr)[-1].data
expected = torch.Tensor([
[4.3974, 1.6925, 1.7111, 1.7111, 2.0840, 2.7033]
for _ in range(5)
])
eps = 1e-3
self.assertTrue((torch.abs(output - expected) < eps).all())
if __name__ == '__main__':
unittest.main()
|
# Copyright 2019 The Blueqat Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The version of blueqat."""
__version__ = "0.4.9-dev"
|
class Cookie:
# Constructor
def __init__(self, name, shape, chips='Chocolate'):
# Instance attributes
self.name = name
self.shape = shape
self.chips = chips
# The object is passing itself as a parameter
def bake(self):
print(f'This {self.name}, is being baked with the shape {self.shape} and chips of {self.chips}')
print('Enjoy your cookie!')
|
#make sure to type these two commands:
#export OMP_NUM_THREADS=64
#module load gsl
#python xiruncz.py --type ELG_HIP
import subprocess
import sys
import argparse
import os
#sys.path.append('../py')
#import LSS.mkCat_singletile.xitools as xt
#import LSS.SV3.xitools as xt
parser = argparse.ArgumentParser()
parser.add_argument("--type", help="tracer type to be selected")
parser.add_argument("--basedir", help="base directory for output, default is desi catalog directory",default='/global/cfs/cdirs/desi/survey/catalogs')
parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test')
parser.add_argument("--verspec",help="version for redshifts",default='everest')
parser.add_argument("--survey",help="e.g., SV3 or main",default='SV3')
parser.add_argument("--nran",help="number of random files to combine together (1-18 available)",default=10)
args = parser.parse_args()
type = args.type
basedir = args.basedir
version = args.version
specrel = args.verspec
survey = args.survey
nran = int(args.nran)
if survey == 'SV3':
import LSS.SV3.xitools as xt
if survey == 'main':
import LSS.main.xitools as xt
lssdir = basedir+'/'+survey+'/LSS/'+specrel+'/LSScats/'
#dirout = svdir+'LSScats/'+version+'/'
zmask = ['']
minn = 0
subt = None
if type == 'LRGAlltiles' or type == 'LRGAlltiles_main':
zl = [0.32,0.6,0.8,1.05,1.3]
#minn = 2
#zmin=0.32
#zmax=1.05
if type == 'LRG':
zl = [0.4,0.6,0.8,1.1]
# minn = 5
#zmin=0.32
#zmax=1.05
if type == 'LRG_OPT':
subt = type
zmin=0.6
zmax=1.
type = 'LRG'
if type == 'LRG_IR':
subt = type
zmin=0.6
zmax=1.
type = 'LRG'
if type[:3] == 'ELG':# or type == 'ELG_HIP':
#minn = 5
zl = [0.8,1.1,1.5]
#zmask = ['','_zmask']
#zmin = 0.8
#zmax = 1.6
#if type == 'ELG_HIP':
# zmin = 0.8
# zmax = 1.6
if type == 'ELG_HIP16':
minn = 5
zl = [1,1.6]
type = 'ELG_HIP'
if type == 'ELG16':
minn = 5
zl = [1,1.6]
type = 'ELG'
if type == 'ELGlz':
zmin = 0.6
zmax = 0.8
type = 'ELG'
if type == 'ELGmz':
zmin = 0.8
zmax = 1.1
type = 'ELG'
if type == 'ELGhz':
zmin = 1.1
zmax = 1.6
type = 'ELG'
if type == 'ELGmhz':
zmin = 0.6
zmax = 1.497
type = 'ELG'
if type == 'ELGhz497':
zmin = 1.1
zmax = 1.497
type = 'ELG'
if type == 'QSO':
zl = [0.8,1.1,1.5,2.1]
#zmin = 1.
#zmax = 2.1
if type == 'QSOhiz':
zmin = 1.6
zmax = 2.1
type = 'QSO'
if type == 'QSOlya':
#zmin = 2.1
#zmax = 3.5
zl = [2.1,3.5]
type = 'QSO'
if type == 'QSO_RF_4PASS':
subt = type
zmin = 1.6
zmax = 2.1
type = 'QSO'
if type == 'ELG_FDR_GFIB':
subt = type
zmin = 1.1
zmax = 1.6
type = 'ELG'
if type[:3] == 'BGS':
#minn = 2
zl = [0.1,0.3,0.5]
#zmin = 0.1
#zmax = 0.5
if type == 'BGS_hiz':
zmin = 0.3
zmax = 0.5
type = 'BGS_ANY'
ranwt1=False
regl = ['_N','_S']
if survey == 'main':
regl = ['_DN','_DS','_N','_S']
for i in range(0,len(zl)):
if i == len(zl)-1:
zmin=zl[0]
zmax=zl[-1]
else:
zmin = zl[i]
zmax = zl[i+1]
print(zmin,zmax)
for zma in zmask:
for reg in regl:
xt.prep4czxi(type,zmin,zmax,nran=nran,indir=lssdir,ver=version,minn=minn,reg=zma+reg,outdir=os.environ['CSCRATCH']+'/cz/',ranwt1=ranwt1,subt=subt)
subprocess.run(['chmod','+x','czpc.sh'])
subprocess.run('./czpc.sh')
fa = ''
if ranwt1:
fa = 'ranwt1'
if subt is not None:
fa += subt
xt.calcxi_dataCZ(type,zmin,zmax,minn=minn,reg=zma+reg,ver=version,fa=fa)
xt.prep4czxi(type,zmin,zmax,nran=nran,indir=lssdir,ver=version,minn=minn,reg=zma,outdir=os.environ['CSCRATCH']+'/cz/',ranwt1=ranwt1,subt=subt)
subprocess.run(['chmod','+x','czpc.sh'])
subprocess.run('./czpc.sh')
fa = ''
if ranwt1:
fa = 'ranwt1'
if subt is not None:
fa += subt
xt.calcxi_dataCZ(type,zmin,zmax,minn=minn,ver=version,fa=fa,reg=zma)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.