content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
import sys
sys.path.append("./stqft")
sys.path.append("./qcnn")
import os
#Activate the cuda env
os.environ["LD_LIBRARY_PATH"] = "$LD_LIBRARY_PATH:/usr/local/cuda/lib64/:/usr/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda-11.2/lib64:/usr/local/cuda/targets/x86_64-linux/lib/"
import time
import multiprocessing
import glob
import numpy as np
datasetPath = "/storage/mstrobl/dataset"
featurePath = "/storage/mstrobl/features"
checkpointsPath = "/storage/mstrobl/checkpoints"
modelsPath = "/storage/mstrobl/models"
quantumPath = "/storage/mstrobl/dataQuantum"
waveformPath = "/storage/mstrobl/waveforms"
checkpointsPath = "/storage/mstrobl/checkpoints"
exportPath = "/storage/mstrobl/versioning"
TOPIC = "PrepGenTrain"
batchSize = 28
kernelSize = 2
epochs = 40
portion = 1
PoolSize = int(multiprocessing.cpu_count()*0.6) #be gentle..
# PoolSize = 1 #be gentle..
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--waveform", default = 1, help = "Generate Waveforms")
parser.add_argument("--quantum", default= 1, help = "Generate Quantum Data")
parser.add_argument("--train", default = 1, action='store_true', help = "Fit the model")
parser.add_argument("--checkTree", default = 1, help = "Checks if the working tree is dirty")
args = parser.parse_args()
from stqft.frontend import export
if int(args.checkTree) == 1:
export.checkWorkingTree(exportPath)
print(f"\n\n\n-----------------------\n\n\n")
print(f"Train Time @{time.time()}")
print(f"\n\n\n-----------------------\n\n\n")
multiprocessing.set_start_method('spawn')
print(f"Running {PoolSize} processes")
datasetFiles = glob.glob(datasetPath + "/**/*.wav", recursive=True)
print(f"Found {len(datasetFiles)} files in the dataset")
exp = export(topic=TOPIC, identifier="dataset", dataDir=exportPath)
exp.setData(export.DESCRIPTION, f"Dataset {len(datasetFiles)} in {datasetPath}")
exp.setData(export.GENERICDATA, datasetFiles)
exp.doExport()
print(f"\n\n\n-----------------------\n\n\n")
print(f"Generating Waveforms @{time.time()}")
print(f"\n\n\n-----------------------\n\n\n")
from generateFeatures import gen_features, gen_quantum, reportSettings, samplingRate
from qcnn.small_qsr import labels
if int(args.waveform)==1:
x_train, x_valid, y_train, y_valid = gen_features(labels, datasetPath, featurePath, PoolSize, waveformPath=waveformPath, portion=portion)
else:
print("Loading from disk...")
x_train = np.load(f"{featurePath}/x_train_speech.npy")
x_valid = np.load(f"{featurePath}/x_valid_speech.npy")
y_train = np.load(f"{featurePath}/y_train_speech.npy")
y_valid = np.load(f"{featurePath}/y_valid_speech.npy")
exp = export(topic=TOPIC, identifier="waveformData", dataDir=exportPath)
exp.setData(export.DESCRIPTION, f"Waveforms generated (T)/ loaded (F): {args.waveform}; Labels used: {labels}; FeaturePath: {featurePath}; PoolSize: {PoolSize}; WaveformPath: {waveformPath}; Portioning: {portion}, SamplingRate: {samplingRate}, {reportSettings()}")
exp.setData(export.GENERICDATA, {"x_train":x_train, "x_valid":x_valid, "y_train":y_train, "y_valid":y_valid})
exp.doExport()
print(f"\n\n\n-----------------------\n\n\n")
print(f"Generating Quantum Data @{time.time()}")
print(f"\n\n\n-----------------------\n\n\n")
# disable quanv and pix chan mal
if int(args.quantum)==-2:
q_train = x_train
q_valid = x_valid
# enable quanv
elif int(args.quantum)==1:
q_train, q_valid = gen_quantum(x_train, x_valid, kernelSize, output=quantumPath, poolSize=PoolSize)
# pix chan map
elif int(args.quantum)==-1:
q_train, q_valid = gen_quantum(x_train, x_valid, kernelSize, output=quantumPath, poolSize=PoolSize, quanv=False)
# load from disk
else:
print("Loading from disk...")
q_train = np.load(f"{quantumPath}/quanv_train.npy")
q_valid = np.load(f"{quantumPath}/quanv_valid.npy")
exp = export(topic=TOPIC, identifier="quantumData", dataDir=exportPath)
exp.setData(export.DESCRIPTION, f"Quantum data generated (T)/ loaded (F): {args.quantum}; FeaturePath: {quantumPath}; PoolSize: {PoolSize};")
exp.setData(export.GENERICDATA, {"q_train":q_train, "q_valid":q_valid})
exp.doExport()
print(f"\n\n\n-----------------------\n\n\n")
print(f"Starting Training @{time.time()}")
print(f"\n\n\n-----------------------\n\n\n")
from fitModel import fit_model
if args.train:
#if quanv completely disabled and no pix channel map
if int(args.quantum)==-2 or q_train.shape[3]==1:
print("using ablation")
# pass quanv data for training and validation
model, history = fit_model(q_train, y_train, q_valid, y_valid, checkpointsPath, epochs=epochs, batchSize=batchSize, ablation=True)
else:
# pass quanv data for training and validation
model, history = fit_model(q_train, y_train, q_valid, y_valid, checkpointsPath, epochs=epochs, batchSize=batchSize, ablation=False)
data_ix = time.strftime("%Y%m%d_%H%M")
model.save(f"{modelsPath}/model_{time.time()}")
else:
print("Training disabled")
exp = export(topic=TOPIC, identifier="model", dataDir=exportPath)
exp.setData(export.DESCRIPTION, f"Model trained (T)/ loaded (F): {args.train}; CheckpointsPath: {checkpointsPath}; ModelsPath: {modelsPath}")
exp.setData(export.GENERICDATA, {"history_acc":history.history['accuracy'], "history_val_acc":history.history['val_accuracy'], "history_loss":history.history['loss'], "history_val_loss":history.history['val_loss']})
exp.doExport() | train.py | 5,793 | Activate the cuda envbe gentle.. PoolSize = 1 be gentle.. disable quanv and pix chan mal enable quanv pix chan map load from diskif quanv completely disabled and no pix channel map pass quanv data for training and validation pass quanv data for training and validation | 268 | en | 0.623057 |
# coding: utf-8
"""
ThingsBoard REST API
ThingsBoard Professional Edition IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3PAAS-RC1
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api_client import ApiClient
class EntityGroupControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def add_entities_to_entity_group_using_post(self, entity_group_id, **kwargs): # noqa: E501
"""Add entities to the group (addEntitiesToEntityGroup) # noqa: E501
Add entities to the specified entity group. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'ADD_TO_GROUP' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_entities_to_entity_group_using_post(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param list[str] body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_entities_to_entity_group_using_post_with_http_info(entity_group_id, **kwargs) # noqa: E501
else:
(data) = self.add_entities_to_entity_group_using_post_with_http_info(entity_group_id, **kwargs) # noqa: E501
return data
def add_entities_to_entity_group_using_post_with_http_info(self, entity_group_id, **kwargs): # noqa: E501
"""Add entities to the group (addEntitiesToEntityGroup) # noqa: E501
Add entities to the specified entity group. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'ADD_TO_GROUP' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_entities_to_entity_group_using_post_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param list[str] body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_entities_to_entity_group_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `add_entities_to_entity_group_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{entityGroupId}/addEntities', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def assign_entity_group_to_edge_using_post(self, edge_id, group_type, entity_group_id, **kwargs): # noqa: E501
"""Assign entity group to edge (assignEntityGroupToEdge) # noqa: E501
Creates assignment of an existing entity group to an instance of The Edge. Assignment works in async way - first, notification event pushed to edge service queue on platform. Second, remote edge service will receive a copy of assignment entity group (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once entity group will be delivered to edge service, edge will request entities of this group to be send to edge. Once entities will be delivered to edge service, they are going to be available for usage on remote edge instance. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_entity_group_to_edge_using_post(edge_id, group_type, entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.assign_entity_group_to_edge_using_post_with_http_info(edge_id, group_type, entity_group_id, **kwargs) # noqa: E501
else:
(data) = self.assign_entity_group_to_edge_using_post_with_http_info(edge_id, group_type, entity_group_id, **kwargs) # noqa: E501
return data
def assign_entity_group_to_edge_using_post_with_http_info(self, edge_id, group_type, entity_group_id, **kwargs): # noqa: E501
"""Assign entity group to edge (assignEntityGroupToEdge) # noqa: E501
Creates assignment of an existing entity group to an instance of The Edge. Assignment works in async way - first, notification event pushed to edge service queue on platform. Second, remote edge service will receive a copy of assignment entity group (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once entity group will be delivered to edge service, edge will request entities of this group to be send to edge. Once entities will be delivered to edge service, they are going to be available for usage on remote edge instance. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_entity_group_to_edge_using_post_with_http_info(edge_id, group_type, entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityGroup
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['edge_id', 'group_type', 'entity_group_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method assign_entity_group_to_edge_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'edge_id' is set
if ('edge_id' not in params or
params['edge_id'] is None):
raise ValueError("Missing the required parameter `edge_id` when calling `assign_entity_group_to_edge_using_post`") # noqa: E501
# verify the required parameter 'group_type' is set
if ('group_type' not in params or
params['group_type'] is None):
raise ValueError("Missing the required parameter `group_type` when calling `assign_entity_group_to_edge_using_post`") # noqa: E501
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `assign_entity_group_to_edge_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'edge_id' in params:
path_params['edgeId'] = params['edge_id'] # noqa: E501
if 'group_type' in params:
path_params['groupType'] = params['group_type'] # noqa: E501
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/edge/{edgeId}/entityGroup/{entityGroupId}/{groupType}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityGroup', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_entity_group_using_delete(self, entity_group_id, **kwargs): # noqa: E501
"""Delete Entity Group (deleteEntityGroup) # noqa: E501
Deletes the entity group but does not delete the entities in the group, since they are also present in reserved group 'All'. Referencing non-existing Entity Group Id will cause an error. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'DELETE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_group_using_delete(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_entity_group_using_delete_with_http_info(entity_group_id, **kwargs) # noqa: E501
else:
(data) = self.delete_entity_group_using_delete_with_http_info(entity_group_id, **kwargs) # noqa: E501
return data
def delete_entity_group_using_delete_with_http_info(self, entity_group_id, **kwargs): # noqa: E501
"""Delete Entity Group (deleteEntityGroup) # noqa: E501
Deletes the entity group but does not delete the entities in the group, since they are also present in reserved group 'All'. Referencing non-existing Entity Group Id will cause an error. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'DELETE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_group_using_delete_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_entity_group_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `delete_entity_group_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{entityGroupId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_edge_entity_groups_using_get(self, edge_id, group_type, **kwargs): # noqa: E501
"""Get All Edge Entity Groups by entity type (getAllEdgeEntityGroups) # noqa: E501
Fetch the list of Entity Group Info objects based on the provided Entity Type and assigned to the provided Edge entity. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_edge_entity_groups_using_get(edge_id, group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:return: list[EntityGroupInfo]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_edge_entity_groups_using_get_with_http_info(edge_id, group_type, **kwargs) # noqa: E501
else:
(data) = self.get_all_edge_entity_groups_using_get_with_http_info(edge_id, group_type, **kwargs) # noqa: E501
return data
def get_all_edge_entity_groups_using_get_with_http_info(self, edge_id, group_type, **kwargs): # noqa: E501
"""Get All Edge Entity Groups by entity type (getAllEdgeEntityGroups) # noqa: E501
Fetch the list of Entity Group Info objects based on the provided Entity Type and assigned to the provided Edge entity. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_edge_entity_groups_using_get_with_http_info(edge_id, group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:return: list[EntityGroupInfo]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['edge_id', 'group_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_edge_entity_groups_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'edge_id' is set
if ('edge_id' not in params or
params['edge_id'] is None):
raise ValueError("Missing the required parameter `edge_id` when calling `get_all_edge_entity_groups_using_get`") # noqa: E501
# verify the required parameter 'group_type' is set
if ('group_type' not in params or
params['group_type'] is None):
raise ValueError("Missing the required parameter `group_type` when calling `get_all_edge_entity_groups_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'edge_id' in params:
path_params['edgeId'] = params['edge_id'] # noqa: E501
if 'group_type' in params:
path_params['groupType'] = params['group_type'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/allEntityGroups/edge/{edgeId}/{groupType}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntityGroupInfo]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_edge_entity_groups_using_get(self, edge_id, group_type, page_size, page, **kwargs): # noqa: E501
"""Get Edge Entity Groups by entity type (getEdgeEntityGroups) # noqa: E501
Returns a page of Entity Group Info objects based on the provided Entity Type and assigned to the provided Edge entity. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids.You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_edge_entity_groups_using_get(edge_id, group_type, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataEntityGroupInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_edge_entity_groups_using_get_with_http_info(edge_id, group_type, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_edge_entity_groups_using_get_with_http_info(edge_id, group_type, page_size, page, **kwargs) # noqa: E501
return data
def get_edge_entity_groups_using_get_with_http_info(self, edge_id, group_type, page_size, page, **kwargs): # noqa: E501
"""Get Edge Entity Groups by entity type (getEdgeEntityGroups) # noqa: E501
Returns a page of Entity Group Info objects based on the provided Entity Type and assigned to the provided Edge entity. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids.You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_edge_entity_groups_using_get_with_http_info(edge_id, group_type, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataEntityGroupInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['edge_id', 'group_type', 'page_size', 'page', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_edge_entity_groups_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'edge_id' is set
if ('edge_id' not in params or
params['edge_id'] is None):
raise ValueError("Missing the required parameter `edge_id` when calling `get_edge_entity_groups_using_get`") # noqa: E501
# verify the required parameter 'group_type' is set
if ('group_type' not in params or
params['group_type'] is None):
raise ValueError("Missing the required parameter `group_type` when calling `get_edge_entity_groups_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_edge_entity_groups_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_edge_entity_groups_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'edge_id' in params:
path_params['edgeId'] = params['edge_id'] # noqa: E501
if 'group_type' in params:
path_params['groupType'] = params['group_type'] # noqa: E501
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroups/edge/{edgeId}/{groupType}{?page,pageSize,sortOrder,sortProperty}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataEntityGroupInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entities_using_get(self, entity_group_id, page_size, page, **kwargs): # noqa: E501
"""Get Group Entities (getEntities) # noqa: E501
Returns a page of Short Entity View objects that belongs to specified Entity Group Id. Short Entity View object contains the entity id and number of fields (attributes, telemetry, etc). List of those fields is configurable and defined in the group configuration.You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entities_using_get(entity_group_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the entity group name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataShortEntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entities_using_get_with_http_info(entity_group_id, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_entities_using_get_with_http_info(entity_group_id, page_size, page, **kwargs) # noqa: E501
return data
def get_entities_using_get_with_http_info(self, entity_group_id, page_size, page, **kwargs): # noqa: E501
"""Get Group Entities (getEntities) # noqa: E501
Returns a page of Short Entity View objects that belongs to specified Entity Group Id. Short Entity View object contains the entity id and number of fields (attributes, telemetry, etc). List of those fields is configurable and defined in the group configuration.You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entities_using_get_with_http_info(entity_group_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the entity group name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataShortEntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_id', 'page_size', 'page', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entities_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `get_entities_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_entities_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_entities_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{entityGroupId}/entities{?page,pageSize,sortOrder,sortProperty,textSearch}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataShortEntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_group_all_by_owner_and_type_using_get(self, owner_type, owner_id, group_type, **kwargs): # noqa: E501
"""Get special group All by owner and entity type (getEntityGroupsByOwnerAndType) # noqa: E501
Fetch reserved group 'All' based on the provided Owner Id and Entity Type. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_group_all_by_owner_and_type_using_get(owner_type, owner_id, group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_type: Tenant or Customer (required)
:param str owner_id: A string value representing the Tenant or Customer id (required)
:param str group_type: Entity Group type (required)
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_group_all_by_owner_and_type_using_get_with_http_info(owner_type, owner_id, group_type, **kwargs) # noqa: E501
else:
(data) = self.get_entity_group_all_by_owner_and_type_using_get_with_http_info(owner_type, owner_id, group_type, **kwargs) # noqa: E501
return data
def get_entity_group_all_by_owner_and_type_using_get_with_http_info(self, owner_type, owner_id, group_type, **kwargs): # noqa: E501
"""Get special group All by owner and entity type (getEntityGroupsByOwnerAndType) # noqa: E501
Fetch reserved group 'All' based on the provided Owner Id and Entity Type. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_group_all_by_owner_and_type_using_get_with_http_info(owner_type, owner_id, group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_type: Tenant or Customer (required)
:param str owner_id: A string value representing the Tenant or Customer id (required)
:param str group_type: Entity Group type (required)
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner_type', 'owner_id', 'group_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_group_all_by_owner_and_type_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner_type' is set
if ('owner_type' not in params or
params['owner_type'] is None):
raise ValueError("Missing the required parameter `owner_type` when calling `get_entity_group_all_by_owner_and_type_using_get`") # noqa: E501
# verify the required parameter 'owner_id' is set
if ('owner_id' not in params or
params['owner_id'] is None):
raise ValueError("Missing the required parameter `owner_id` when calling `get_entity_group_all_by_owner_and_type_using_get`") # noqa: E501
# verify the required parameter 'group_type' is set
if ('group_type' not in params or
params['group_type'] is None):
raise ValueError("Missing the required parameter `group_type` when calling `get_entity_group_all_by_owner_and_type_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner_type' in params:
path_params['ownerType'] = params['owner_type'] # noqa: E501
if 'owner_id' in params:
path_params['ownerId'] = params['owner_id'] # noqa: E501
if 'group_type' in params:
path_params['groupType'] = params['group_type'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/all/{ownerType}/{ownerId}/{groupType}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityGroupInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_group_by_id_using_get(self, entity_group_id, **kwargs): # noqa: E501
"""Get Entity Group Info (getEntityGroupById) # noqa: E501
Fetch the Entity Group object based on the provided Entity Group Id. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Entity group name is unique in the scope of owner and entity type. For example, you can't create two tenant device groups called 'Water meters'. However, you may create device and asset group with the same name. And also you may create groups with the same name for two different customers of the same tenant. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_group_by_id_using_get(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_group_by_id_using_get_with_http_info(entity_group_id, **kwargs) # noqa: E501
else:
(data) = self.get_entity_group_by_id_using_get_with_http_info(entity_group_id, **kwargs) # noqa: E501
return data
def get_entity_group_by_id_using_get_with_http_info(self, entity_group_id, **kwargs): # noqa: E501
"""Get Entity Group Info (getEntityGroupById) # noqa: E501
Fetch the Entity Group object based on the provided Entity Group Id. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Entity group name is unique in the scope of owner and entity type. For example, you can't create two tenant device groups called 'Water meters'. However, you may create device and asset group with the same name. And also you may create groups with the same name for two different customers of the same tenant. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_group_by_id_using_get_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_group_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `get_entity_group_by_id_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{entityGroupId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityGroupInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_group_by_owner_and_name_and_type_using_get(self, owner_type, owner_id, group_type, group_name, **kwargs): # noqa: E501
"""Get Entity Group by owner, type and name (getEntityGroupByOwnerAndNameAndType) # noqa: E501
Fetch the Entity Group object based on the provided Entity Group Id. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Entity group name is unique in the scope of owner and entity type. For example, you can't create two tenant device groups called 'Water meters'. However, you may create device and asset group with the same name. And also you may create groups with the same name for two different customers of the same tenant. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_group_by_owner_and_name_and_type_using_get(owner_type, owner_id, group_type, group_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_type: Tenant or Customer (required)
:param str owner_id: A string value representing the Tenant or Customer id (required)
:param str group_type: Entity Group type (required)
:param str group_name: Entity Group name (required)
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_group_by_owner_and_name_and_type_using_get_with_http_info(owner_type, owner_id, group_type, group_name, **kwargs) # noqa: E501
else:
(data) = self.get_entity_group_by_owner_and_name_and_type_using_get_with_http_info(owner_type, owner_id, group_type, group_name, **kwargs) # noqa: E501
return data
def get_entity_group_by_owner_and_name_and_type_using_get_with_http_info(self, owner_type, owner_id, group_type, group_name, **kwargs): # noqa: E501
"""Get Entity Group by owner, type and name (getEntityGroupByOwnerAndNameAndType) # noqa: E501
Fetch the Entity Group object based on the provided Entity Group Id. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Entity group name is unique in the scope of owner and entity type. For example, you can't create two tenant device groups called 'Water meters'. However, you may create device and asset group with the same name. And also you may create groups with the same name for two different customers of the same tenant. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_group_by_owner_and_name_and_type_using_get_with_http_info(owner_type, owner_id, group_type, group_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_type: Tenant or Customer (required)
:param str owner_id: A string value representing the Tenant or Customer id (required)
:param str group_type: Entity Group type (required)
:param str group_name: Entity Group name (required)
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner_type', 'owner_id', 'group_type', 'group_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_group_by_owner_and_name_and_type_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner_type' is set
if ('owner_type' not in params or
params['owner_type'] is None):
raise ValueError("Missing the required parameter `owner_type` when calling `get_entity_group_by_owner_and_name_and_type_using_get`") # noqa: E501
# verify the required parameter 'owner_id' is set
if ('owner_id' not in params or
params['owner_id'] is None):
raise ValueError("Missing the required parameter `owner_id` when calling `get_entity_group_by_owner_and_name_and_type_using_get`") # noqa: E501
# verify the required parameter 'group_type' is set
if ('group_type' not in params or
params['group_type'] is None):
raise ValueError("Missing the required parameter `group_type` when calling `get_entity_group_by_owner_and_name_and_type_using_get`") # noqa: E501
# verify the required parameter 'group_name' is set
if ('group_name' not in params or
params['group_name'] is None):
raise ValueError("Missing the required parameter `group_name` when calling `get_entity_group_by_owner_and_name_and_type_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner_type' in params:
path_params['ownerType'] = params['owner_type'] # noqa: E501
if 'owner_id' in params:
path_params['ownerId'] = params['owner_id'] # noqa: E501
if 'group_type' in params:
path_params['groupType'] = params['group_type'] # noqa: E501
if 'group_name' in params:
path_params['groupName'] = params['group_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{ownerType}/{ownerId}/{groupType}/{groupName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityGroupInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_groups_by_ids_using_get(self, entity_group_ids, **kwargs): # noqa: E501
"""Get Entity Groups by Ids (getDevicesByIds) # noqa: E501
Requested devices must be owned by tenant or assigned to customer which user is performing the request. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_by_ids_using_get(entity_group_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_ids: A list of group ids, separated by comma ',' (required)
:return: list[EntityGroup]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_groups_by_ids_using_get_with_http_info(entity_group_ids, **kwargs) # noqa: E501
else:
(data) = self.get_entity_groups_by_ids_using_get_with_http_info(entity_group_ids, **kwargs) # noqa: E501
return data
def get_entity_groups_by_ids_using_get_with_http_info(self, entity_group_ids, **kwargs): # noqa: E501
"""Get Entity Groups by Ids (getDevicesByIds) # noqa: E501
Requested devices must be owned by tenant or assigned to customer which user is performing the request. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_by_ids_using_get_with_http_info(entity_group_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_ids: A list of group ids, separated by comma ',' (required)
:return: list[EntityGroup]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_ids'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_groups_by_ids_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_ids' is set
if ('entity_group_ids' not in params or
params['entity_group_ids'] is None):
raise ValueError("Missing the required parameter `entity_group_ids` when calling `get_entity_groups_by_ids_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'entity_group_ids' in params:
query_params.append(('entityGroupIds', params['entity_group_ids'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroups{?entityGroupIds}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntityGroup]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_groups_by_owner_and_type_using_get(self, owner_type, owner_id, group_type, **kwargs): # noqa: E501
"""Get Entity Groups by owner and entity type (getEntityGroupsByOwnerAndType) # noqa: E501
Fetch the list of Entity Group Info objects based on the provided Owner Id and Entity Type. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_by_owner_and_type_using_get(owner_type, owner_id, group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_type: Tenant or Customer (required)
:param str owner_id: A string value representing the Tenant or Customer id (required)
:param str group_type: Entity Group type (required)
:return: list[EntityGroupInfo]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_groups_by_owner_and_type_using_get_with_http_info(owner_type, owner_id, group_type, **kwargs) # noqa: E501
else:
(data) = self.get_entity_groups_by_owner_and_type_using_get_with_http_info(owner_type, owner_id, group_type, **kwargs) # noqa: E501
return data
def get_entity_groups_by_owner_and_type_using_get_with_http_info(self, owner_type, owner_id, group_type, **kwargs): # noqa: E501
"""Get Entity Groups by owner and entity type (getEntityGroupsByOwnerAndType) # noqa: E501
Fetch the list of Entity Group Info objects based on the provided Owner Id and Entity Type. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_by_owner_and_type_using_get_with_http_info(owner_type, owner_id, group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_type: Tenant or Customer (required)
:param str owner_id: A string value representing the Tenant or Customer id (required)
:param str group_type: Entity Group type (required)
:return: list[EntityGroupInfo]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner_type', 'owner_id', 'group_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_groups_by_owner_and_type_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner_type' is set
if ('owner_type' not in params or
params['owner_type'] is None):
raise ValueError("Missing the required parameter `owner_type` when calling `get_entity_groups_by_owner_and_type_using_get`") # noqa: E501
# verify the required parameter 'owner_id' is set
if ('owner_id' not in params or
params['owner_id'] is None):
raise ValueError("Missing the required parameter `owner_id` when calling `get_entity_groups_by_owner_and_type_using_get`") # noqa: E501
# verify the required parameter 'group_type' is set
if ('group_type' not in params or
params['group_type'] is None):
raise ValueError("Missing the required parameter `group_type` when calling `get_entity_groups_by_owner_and_type_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner_type' in params:
path_params['ownerType'] = params['owner_type'] # noqa: E501
if 'owner_id' in params:
path_params['ownerId'] = params['owner_id'] # noqa: E501
if 'group_type' in params:
path_params['groupType'] = params['group_type'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroups/{ownerType}/{ownerId}/{groupType}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntityGroupInfo]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_groups_by_type_using_get(self, group_type, **kwargs): # noqa: E501
"""Get Entity Groups by entity type (getEntityGroupsByType) # noqa: E501
Fetch the list of Entity Group Info objects based on the provided Entity Type. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_by_type_using_get(group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group_type: Entity Group type (required)
:return: list[EntityGroupInfo]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_groups_by_type_using_get_with_http_info(group_type, **kwargs) # noqa: E501
else:
(data) = self.get_entity_groups_by_type_using_get_with_http_info(group_type, **kwargs) # noqa: E501
return data
def get_entity_groups_by_type_using_get_with_http_info(self, group_type, **kwargs): # noqa: E501
"""Get Entity Groups by entity type (getEntityGroupsByType) # noqa: E501
Fetch the list of Entity Group Info objects based on the provided Entity Type. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_by_type_using_get_with_http_info(group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group_type: Entity Group type (required)
:return: list[EntityGroupInfo]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['group_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_groups_by_type_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'group_type' is set
if ('group_type' not in params or
params['group_type'] is None):
raise ValueError("Missing the required parameter `group_type` when calling `get_entity_groups_by_type_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'group_type' in params:
path_params['groupType'] = params['group_type'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroups/{groupType}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntityGroupInfo]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entity_groups_for_entity_using_get(self, entity_type, entity_id, **kwargs): # noqa: E501
"""Get Entity Groups by Entity Id (getEntityGroupsForEntity) # noqa: E501
Returns a list of groups that contain the specified Entity Id. For example, all device groups that contain specific device. The list always contain at least one element - special group 'All'.You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_for_entity_using_get(entity_type, entity_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_type: Entity Group type (required)
:param str entity_id: A string value representing the entity id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: list[EntityGroupId]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entity_groups_for_entity_using_get_with_http_info(entity_type, entity_id, **kwargs) # noqa: E501
else:
(data) = self.get_entity_groups_for_entity_using_get_with_http_info(entity_type, entity_id, **kwargs) # noqa: E501
return data
def get_entity_groups_for_entity_using_get_with_http_info(self, entity_type, entity_id, **kwargs): # noqa: E501
"""Get Entity Groups by Entity Id (getEntityGroupsForEntity) # noqa: E501
Returns a list of groups that contain the specified Entity Id. For example, all device groups that contain specific device. The list always contain at least one element - special group 'All'.You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_for_entity_using_get_with_http_info(entity_type, entity_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_type: Entity Group type (required)
:param str entity_id: A string value representing the entity id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: list[EntityGroupId]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_type', 'entity_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_groups_for_entity_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_type' is set
if ('entity_type' not in params or
params['entity_type'] is None):
raise ValueError("Missing the required parameter `entity_type` when calling `get_entity_groups_for_entity_using_get`") # noqa: E501
# verify the required parameter 'entity_id' is set
if ('entity_id' not in params or
params['entity_id'] is None):
raise ValueError("Missing the required parameter `entity_id` when calling `get_entity_groups_for_entity_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_type' in params:
path_params['entityType'] = params['entity_type'] # noqa: E501
if 'entity_id' in params:
path_params['entityId'] = params['entity_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroups/{entityType}/{entityId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntityGroupId]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_group_entity_using_get(self, entity_group_id, entity_id, **kwargs): # noqa: E501
"""Get Group Entity (getGroupEntity) # noqa: E501
Fetch the Short Entity View object based on the group and entity id. Short Entity View object contains the entity id and number of fields (attributes, telemetry, etc). List of those fields is configurable and defined in the group configuration. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_group_entity_using_get(entity_group_id, entity_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str entity_id: A string value representing the entity id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: ShortEntityView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_group_entity_using_get_with_http_info(entity_group_id, entity_id, **kwargs) # noqa: E501
else:
(data) = self.get_group_entity_using_get_with_http_info(entity_group_id, entity_id, **kwargs) # noqa: E501
return data
def get_group_entity_using_get_with_http_info(self, entity_group_id, entity_id, **kwargs): # noqa: E501
"""Get Group Entity (getGroupEntity) # noqa: E501
Fetch the Short Entity View object based on the group and entity id. Short Entity View object contains the entity id and number of fields (attributes, telemetry, etc). List of those fields is configurable and defined in the group configuration. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_group_entity_using_get_with_http_info(entity_group_id, entity_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str entity_id: A string value representing the entity id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: ShortEntityView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_id', 'entity_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_group_entity_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `get_group_entity_using_get`") # noqa: E501
# verify the required parameter 'entity_id' is set
if ('entity_id' not in params or
params['entity_id'] is None):
raise ValueError("Missing the required parameter `entity_id` when calling `get_group_entity_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
if 'entity_id' in params:
path_params['entityId'] = params['entity_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{entityGroupId}/{entityId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ShortEntityView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_owners_using_get(self, page_size, page, **kwargs): # noqa: E501
"""Get Owners (getOwners) # noqa: E501
Provides a rage view of Customers that the current user has READ access to. If the current user is Tenant administrator, the result set also contains the tenant. The call is designed for the UI auto-complete component to show tenant and all possible Customers that the user may select to change the owner of the particular entity or entity group. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_owners_using_get(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the entity group name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataContactBasedobject
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_owners_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_owners_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_owners_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""Get Owners (getOwners) # noqa: E501
Provides a rage view of Customers that the current user has READ access to. If the current user is Tenant administrator, the result set also contains the tenant. The call is designed for the UI auto-complete component to show tenant and all possible Customers that the user may select to change the owner of the particular entity or entity group. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_owners_using_get_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the entity group name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataContactBasedobject
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_owners_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_owners_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_owners_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/owners{?page,pageSize,sortOrder,sortProperty,textSearch}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataContactBasedobject', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def make_entity_group_private_using_post(self, entity_group_id, **kwargs): # noqa: E501
"""Make Entity Group Private (makeEntityGroupPrivate) # noqa: E501
Make the entity group not available for non authorized users. Every group is private by default. This call is useful to hide the group that was previously made public. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.make_entity_group_private_using_post(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.make_entity_group_private_using_post_with_http_info(entity_group_id, **kwargs) # noqa: E501
else:
(data) = self.make_entity_group_private_using_post_with_http_info(entity_group_id, **kwargs) # noqa: E501
return data
def make_entity_group_private_using_post_with_http_info(self, entity_group_id, **kwargs): # noqa: E501
"""Make Entity Group Private (makeEntityGroupPrivate) # noqa: E501
Make the entity group not available for non authorized users. Every group is private by default. This call is useful to hide the group that was previously made public. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.make_entity_group_private_using_post_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method make_entity_group_private_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `make_entity_group_private_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{entityGroupId}/makePrivate', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def make_entity_group_public_using_post(self, entity_group_id, **kwargs): # noqa: E501
"""Make Entity Group Publicly available (makeEntityGroupPublic) # noqa: E501
Make the entity group available for non authorized users. Useful for public dashboards that will be embedded into the public websites. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.make_entity_group_public_using_post(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.make_entity_group_public_using_post_with_http_info(entity_group_id, **kwargs) # noqa: E501
else:
(data) = self.make_entity_group_public_using_post_with_http_info(entity_group_id, **kwargs) # noqa: E501
return data
def make_entity_group_public_using_post_with_http_info(self, entity_group_id, **kwargs): # noqa: E501
"""Make Entity Group Publicly available (makeEntityGroupPublic) # noqa: E501
Make the entity group available for non authorized users. Useful for public dashboards that will be embedded into the public websites. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.make_entity_group_public_using_post_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method make_entity_group_public_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `make_entity_group_public_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{entityGroupId}/makePublic', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_entities_from_entity_group_using_post(self, entity_group_id, **kwargs): # noqa: E501
"""Remove entities from the group (removeEntitiesFromEntityGroup) # noqa: E501
Removes entities from the specified entity group. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'REMOVE_FROM_GROUP' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_entities_from_entity_group_using_post(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param list[str] body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_entities_from_entity_group_using_post_with_http_info(entity_group_id, **kwargs) # noqa: E501
else:
(data) = self.remove_entities_from_entity_group_using_post_with_http_info(entity_group_id, **kwargs) # noqa: E501
return data
def remove_entities_from_entity_group_using_post_with_http_info(self, entity_group_id, **kwargs): # noqa: E501
"""Remove entities from the group (removeEntitiesFromEntityGroup) # noqa: E501
Removes entities from the specified entity group. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'REMOVE_FROM_GROUP' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_entities_from_entity_group_using_post_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param list[str] body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_entities_from_entity_group_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `remove_entities_from_entity_group_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{entityGroupId}/deleteEntities', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_entity_group_using_post(self, **kwargs): # noqa: E501
"""Create Or Update Entity Group (saveEntityGroup) # noqa: E501
Create or update the Entity Group. When creating Entity Group, platform generates Entity Group Id as [time-based UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_1_(date-time_and_MAC_address)). The newly created Entity Group Id will be present in the response. Specify existing Entity Group Id to update the group. Referencing non-existing Entity Group Id will cause 'Not Found' error. Entity group name is unique in the scope of owner and entity type. For example, you can't create two tenant device groups called 'Water meters'. However, you may create device and asset group with the same name. And also you may create groups with the same name for two different customers of the same tenant. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_entity_group_using_post(async_req=True)
>>> result = thread.get()
:param async_req bool
:param EntityGroup body:
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_entity_group_using_post_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.save_entity_group_using_post_with_http_info(**kwargs) # noqa: E501
return data
def save_entity_group_using_post_with_http_info(self, **kwargs): # noqa: E501
"""Create Or Update Entity Group (saveEntityGroup) # noqa: E501
Create or update the Entity Group. When creating Entity Group, platform generates Entity Group Id as [time-based UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_1_(date-time_and_MAC_address)). The newly created Entity Group Id will be present in the response. Specify existing Entity Group Id to update the group. Referencing non-existing Entity Group Id will cause 'Not Found' error. Entity group name is unique in the scope of owner and entity type. For example, you can't create two tenant device groups called 'Water meters'. However, you may create device and asset group with the same name. And also you may create groups with the same name for two different customers of the same tenant. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_entity_group_using_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param EntityGroup body:
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_entity_group_using_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityGroupInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def share_entity_group_to_child_owner_user_group_using_post(self, entity_group_id, user_group_id, role_id, **kwargs): # noqa: E501
"""Share the Entity Group with User group (shareEntityGroupToChildOwnerUserGroup) # noqa: E501
Share the entity group with specified user group using specified role. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.share_entity_group_to_child_owner_user_group_using_post(entity_group_id, user_group_id, role_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id that you would like to share. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str user_group_id: A string value representing the Entity(User) Group Id that you would like to share with. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str role_id: A string value representing the Role Id that describes set of permissions you would like to share (read, write, etc). For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.share_entity_group_to_child_owner_user_group_using_post_with_http_info(entity_group_id, user_group_id, role_id, **kwargs) # noqa: E501
else:
(data) = self.share_entity_group_to_child_owner_user_group_using_post_with_http_info(entity_group_id, user_group_id, role_id, **kwargs) # noqa: E501
return data
def share_entity_group_to_child_owner_user_group_using_post_with_http_info(self, entity_group_id, user_group_id, role_id, **kwargs): # noqa: E501
"""Share the Entity Group with User group (shareEntityGroupToChildOwnerUserGroup) # noqa: E501
Share the entity group with specified user group using specified role. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.share_entity_group_to_child_owner_user_group_using_post_with_http_info(entity_group_id, user_group_id, role_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id that you would like to share. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str user_group_id: A string value representing the Entity(User) Group Id that you would like to share with. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str role_id: A string value representing the Role Id that describes set of permissions you would like to share (read, write, etc). For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_id', 'user_group_id', 'role_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method share_entity_group_to_child_owner_user_group_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `share_entity_group_to_child_owner_user_group_using_post`") # noqa: E501
# verify the required parameter 'user_group_id' is set
if ('user_group_id' not in params or
params['user_group_id'] is None):
raise ValueError("Missing the required parameter `user_group_id` when calling `share_entity_group_to_child_owner_user_group_using_post`") # noqa: E501
# verify the required parameter 'role_id' is set
if ('role_id' not in params or
params['role_id'] is None):
raise ValueError("Missing the required parameter `role_id` when calling `share_entity_group_to_child_owner_user_group_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
if 'user_group_id' in params:
path_params['userGroupId'] = params['user_group_id'] # noqa: E501
if 'role_id' in params:
path_params['roleId'] = params['role_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{entityGroupId}/{userGroupId}/{roleId}/share', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def share_entity_group_using_post(self, entity_group_id, **kwargs): # noqa: E501
"""Share the Entity Group (shareEntityGroup) # noqa: E501
Share the entity group with certain user group based on the provided Share Group Request. The request is quite flexible and processing of the request involves multiple security checks using platform RBAC feature. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.share_entity_group_using_post(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param ShareGroupRequest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.share_entity_group_using_post_with_http_info(entity_group_id, **kwargs) # noqa: E501
else:
(data) = self.share_entity_group_using_post_with_http_info(entity_group_id, **kwargs) # noqa: E501
return data
def share_entity_group_using_post_with_http_info(self, entity_group_id, **kwargs): # noqa: E501
"""Share the Entity Group (shareEntityGroup) # noqa: E501
Share the entity group with certain user group based on the provided Share Group Request. The request is quite flexible and processing of the request involves multiple security checks using platform RBAC feature. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.share_entity_group_using_post_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param ShareGroupRequest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method share_entity_group_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `share_entity_group_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{entityGroupId}/share', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def unassign_entity_group_from_edge_using_delete(self, edge_id, group_type, entity_group_id, **kwargs): # noqa: E501
"""Unassign entity group from edge (unassignEntityGroupFromEdge) # noqa: E501
Clears assignment of the entity group to the edge. Unassignment works in async way - first, 'unassign' notification event pushed to edge queue on platform. Second, remote edge service will receive an 'unassign' command to remove entity group (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once 'unassign' command will be delivered to edge service, it's going to remove entity group and entities inside this group locally. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_entity_group_from_edge_using_delete(edge_id, group_type, entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unassign_entity_group_from_edge_using_delete_with_http_info(edge_id, group_type, entity_group_id, **kwargs) # noqa: E501
else:
(data) = self.unassign_entity_group_from_edge_using_delete_with_http_info(edge_id, group_type, entity_group_id, **kwargs) # noqa: E501
return data
def unassign_entity_group_from_edge_using_delete_with_http_info(self, edge_id, group_type, entity_group_id, **kwargs): # noqa: E501
"""Unassign entity group from edge (unassignEntityGroupFromEdge) # noqa: E501
Clears assignment of the entity group to the edge. Unassignment works in async way - first, 'unassign' notification event pushed to edge queue on platform. Second, remote edge service will receive an 'unassign' command to remove entity group (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once 'unassign' command will be delivered to edge service, it's going to remove entity group and entities inside this group locally. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_entity_group_from_edge_using_delete_with_http_info(edge_id, group_type, entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityGroup
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['edge_id', 'group_type', 'entity_group_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unassign_entity_group_from_edge_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'edge_id' is set
if ('edge_id' not in params or
params['edge_id'] is None):
raise ValueError("Missing the required parameter `edge_id` when calling `unassign_entity_group_from_edge_using_delete`") # noqa: E501
# verify the required parameter 'group_type' is set
if ('group_type' not in params or
params['group_type'] is None):
raise ValueError("Missing the required parameter `group_type` when calling `unassign_entity_group_from_edge_using_delete`") # noqa: E501
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `unassign_entity_group_from_edge_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'edge_id' in params:
path_params['edgeId'] = params['edge_id'] # noqa: E501
if 'group_type' in params:
path_params['groupType'] = params['group_type'] # noqa: E501
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/edge/{edgeId}/entityGroup/{entityGroupId}/{groupType}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityGroup', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| tb_rest_client/api/api_pe/entity_group_controller_api.py | 132,204 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
Add entities to the group (addEntitiesToEntityGroup) # noqa: E501
Add entities to the specified entity group. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'ADD_TO_GROUP' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_entities_to_entity_group_using_post(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param list[str] body:
:return: None
If the method is called asynchronously,
returns the request thread.
Add entities to the group (addEntitiesToEntityGroup) # noqa: E501
Add entities to the specified entity group. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'ADD_TO_GROUP' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_entities_to_entity_group_using_post_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param list[str] body:
:return: None
If the method is called asynchronously,
returns the request thread.
Assign entity group to edge (assignEntityGroupToEdge) # noqa: E501
Creates assignment of an existing entity group to an instance of The Edge. Assignment works in async way - first, notification event pushed to edge service queue on platform. Second, remote edge service will receive a copy of assignment entity group (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once entity group will be delivered to edge service, edge will request entities of this group to be send to edge. Once entities will be delivered to edge service, they are going to be available for usage on remote edge instance. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_entity_group_to_edge_using_post(edge_id, group_type, entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityGroup
If the method is called asynchronously,
returns the request thread.
Assign entity group to edge (assignEntityGroupToEdge) # noqa: E501
Creates assignment of an existing entity group to an instance of The Edge. Assignment works in async way - first, notification event pushed to edge service queue on platform. Second, remote edge service will receive a copy of assignment entity group (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once entity group will be delivered to edge service, edge will request entities of this group to be send to edge. Once entities will be delivered to edge service, they are going to be available for usage on remote edge instance. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_entity_group_to_edge_using_post_with_http_info(edge_id, group_type, entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityGroup
If the method is called asynchronously,
returns the request thread.
Delete Entity Group (deleteEntityGroup) # noqa: E501
Deletes the entity group but does not delete the entities in the group, since they are also present in reserved group 'All'. Referencing non-existing Entity Group Id will cause an error. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'DELETE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_group_using_delete(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
Delete Entity Group (deleteEntityGroup) # noqa: E501
Deletes the entity group but does not delete the entities in the group, since they are also present in reserved group 'All'. Referencing non-existing Entity Group Id will cause an error. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'DELETE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_entity_group_using_delete_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
Get All Edge Entity Groups by entity type (getAllEdgeEntityGroups) # noqa: E501
Fetch the list of Entity Group Info objects based on the provided Entity Type and assigned to the provided Edge entity. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_edge_entity_groups_using_get(edge_id, group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:return: list[EntityGroupInfo]
If the method is called asynchronously,
returns the request thread.
Get All Edge Entity Groups by entity type (getAllEdgeEntityGroups) # noqa: E501
Fetch the list of Entity Group Info objects based on the provided Entity Type and assigned to the provided Edge entity. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_edge_entity_groups_using_get_with_http_info(edge_id, group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:return: list[EntityGroupInfo]
If the method is called asynchronously,
returns the request thread.
Get Edge Entity Groups by entity type (getEdgeEntityGroups) # noqa: E501
Returns a page of Entity Group Info objects based on the provided Entity Type and assigned to the provided Edge entity. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids.You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_edge_entity_groups_using_get(edge_id, group_type, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataEntityGroupInfo
If the method is called asynchronously,
returns the request thread.
Get Edge Entity Groups by entity type (getEdgeEntityGroups) # noqa: E501
Returns a page of Entity Group Info objects based on the provided Entity Type and assigned to the provided Edge entity. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids.You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_edge_entity_groups_using_get_with_http_info(edge_id, group_type, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataEntityGroupInfo
If the method is called asynchronously,
returns the request thread.
Get Group Entities (getEntities) # noqa: E501
Returns a page of Short Entity View objects that belongs to specified Entity Group Id. Short Entity View object contains the entity id and number of fields (attributes, telemetry, etc). List of those fields is configurable and defined in the group configuration.You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entities_using_get(entity_group_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the entity group name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataShortEntityView
If the method is called asynchronously,
returns the request thread.
Get Group Entities (getEntities) # noqa: E501
Returns a page of Short Entity View objects that belongs to specified Entity Group Id. Short Entity View object contains the entity id and number of fields (attributes, telemetry, etc). List of those fields is configurable and defined in the group configuration.You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entities_using_get_with_http_info(entity_group_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the entity group name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataShortEntityView
If the method is called asynchronously,
returns the request thread.
Get special group All by owner and entity type (getEntityGroupsByOwnerAndType) # noqa: E501
Fetch reserved group 'All' based on the provided Owner Id and Entity Type. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_group_all_by_owner_and_type_using_get(owner_type, owner_id, group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_type: Tenant or Customer (required)
:param str owner_id: A string value representing the Tenant or Customer id (required)
:param str group_type: Entity Group type (required)
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
Get special group All by owner and entity type (getEntityGroupsByOwnerAndType) # noqa: E501
Fetch reserved group 'All' based on the provided Owner Id and Entity Type. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_group_all_by_owner_and_type_using_get_with_http_info(owner_type, owner_id, group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_type: Tenant or Customer (required)
:param str owner_id: A string value representing the Tenant or Customer id (required)
:param str group_type: Entity Group type (required)
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
Get Entity Group Info (getEntityGroupById) # noqa: E501
Fetch the Entity Group object based on the provided Entity Group Id. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Entity group name is unique in the scope of owner and entity type. For example, you can't create two tenant device groups called 'Water meters'. However, you may create device and asset group with the same name. And also you may create groups with the same name for two different customers of the same tenant. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_group_by_id_using_get(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
Get Entity Group Info (getEntityGroupById) # noqa: E501
Fetch the Entity Group object based on the provided Entity Group Id. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Entity group name is unique in the scope of owner and entity type. For example, you can't create two tenant device groups called 'Water meters'. However, you may create device and asset group with the same name. And also you may create groups with the same name for two different customers of the same tenant. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_group_by_id_using_get_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
Get Entity Group by owner, type and name (getEntityGroupByOwnerAndNameAndType) # noqa: E501
Fetch the Entity Group object based on the provided Entity Group Id. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Entity group name is unique in the scope of owner and entity type. For example, you can't create two tenant device groups called 'Water meters'. However, you may create device and asset group with the same name. And also you may create groups with the same name for two different customers of the same tenant. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_group_by_owner_and_name_and_type_using_get(owner_type, owner_id, group_type, group_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_type: Tenant or Customer (required)
:param str owner_id: A string value representing the Tenant or Customer id (required)
:param str group_type: Entity Group type (required)
:param str group_name: Entity Group name (required)
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
Get Entity Group by owner, type and name (getEntityGroupByOwnerAndNameAndType) # noqa: E501
Fetch the Entity Group object based on the provided Entity Group Id. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Entity group name is unique in the scope of owner and entity type. For example, you can't create two tenant device groups called 'Water meters'. However, you may create device and asset group with the same name. And also you may create groups with the same name for two different customers of the same tenant. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_group_by_owner_and_name_and_type_using_get_with_http_info(owner_type, owner_id, group_type, group_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_type: Tenant or Customer (required)
:param str owner_id: A string value representing the Tenant or Customer id (required)
:param str group_type: Entity Group type (required)
:param str group_name: Entity Group name (required)
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
Get Entity Groups by Ids (getDevicesByIds) # noqa: E501
Requested devices must be owned by tenant or assigned to customer which user is performing the request. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_by_ids_using_get(entity_group_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_ids: A list of group ids, separated by comma ',' (required)
:return: list[EntityGroup]
If the method is called asynchronously,
returns the request thread.
Get Entity Groups by Ids (getDevicesByIds) # noqa: E501
Requested devices must be owned by tenant or assigned to customer which user is performing the request. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_by_ids_using_get_with_http_info(entity_group_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_ids: A list of group ids, separated by comma ',' (required)
:return: list[EntityGroup]
If the method is called asynchronously,
returns the request thread.
Get Entity Groups by owner and entity type (getEntityGroupsByOwnerAndType) # noqa: E501
Fetch the list of Entity Group Info objects based on the provided Owner Id and Entity Type. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_by_owner_and_type_using_get(owner_type, owner_id, group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_type: Tenant or Customer (required)
:param str owner_id: A string value representing the Tenant or Customer id (required)
:param str group_type: Entity Group type (required)
:return: list[EntityGroupInfo]
If the method is called asynchronously,
returns the request thread.
Get Entity Groups by owner and entity type (getEntityGroupsByOwnerAndType) # noqa: E501
Fetch the list of Entity Group Info objects based on the provided Owner Id and Entity Type. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_by_owner_and_type_using_get_with_http_info(owner_type, owner_id, group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner_type: Tenant or Customer (required)
:param str owner_id: A string value representing the Tenant or Customer id (required)
:param str group_type: Entity Group type (required)
:return: list[EntityGroupInfo]
If the method is called asynchronously,
returns the request thread.
Get Entity Groups by entity type (getEntityGroupsByType) # noqa: E501
Fetch the list of Entity Group Info objects based on the provided Entity Type. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_by_type_using_get(group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group_type: Entity Group type (required)
:return: list[EntityGroupInfo]
If the method is called asynchronously,
returns the request thread.
Get Entity Groups by entity type (getEntityGroupsByType) # noqa: E501
Fetch the list of Entity Group Info objects based on the provided Entity Type. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously.Entity Group Info extends Entity Group object and adds 'ownerIds' - a list of owner ids. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_by_type_using_get_with_http_info(group_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group_type: Entity Group type (required)
:return: list[EntityGroupInfo]
If the method is called asynchronously,
returns the request thread.
Get Entity Groups by Entity Id (getEntityGroupsForEntity) # noqa: E501
Returns a list of groups that contain the specified Entity Id. For example, all device groups that contain specific device. The list always contain at least one element - special group 'All'.You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_for_entity_using_get(entity_type, entity_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_type: Entity Group type (required)
:param str entity_id: A string value representing the entity id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: list[EntityGroupId]
If the method is called asynchronously,
returns the request thread.
Get Entity Groups by Entity Id (getEntityGroupsForEntity) # noqa: E501
Returns a list of groups that contain the specified Entity Id. For example, all device groups that contain specific device. The list always contain at least one element - special group 'All'.You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_groups_for_entity_using_get_with_http_info(entity_type, entity_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_type: Entity Group type (required)
:param str entity_id: A string value representing the entity id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: list[EntityGroupId]
If the method is called asynchronously,
returns the request thread.
Get Group Entity (getGroupEntity) # noqa: E501
Fetch the Short Entity View object based on the group and entity id. Short Entity View object contains the entity id and number of fields (attributes, telemetry, etc). List of those fields is configurable and defined in the group configuration. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_group_entity_using_get(entity_group_id, entity_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str entity_id: A string value representing the entity id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: ShortEntityView
If the method is called asynchronously,
returns the request thread.
Get Group Entity (getGroupEntity) # noqa: E501
Fetch the Short Entity View object based on the group and entity id. Short Entity View object contains the entity id and number of fields (attributes, telemetry, etc). List of those fields is configurable and defined in the group configuration. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_group_entity_using_get_with_http_info(entity_group_id, entity_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str entity_id: A string value representing the entity id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: ShortEntityView
If the method is called asynchronously,
returns the request thread.
Get Owners (getOwners) # noqa: E501
Provides a rage view of Customers that the current user has READ access to. If the current user is Tenant administrator, the result set also contains the tenant. The call is designed for the UI auto-complete component to show tenant and all possible Customers that the user may select to change the owner of the particular entity or entity group. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_owners_using_get(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the entity group name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataContactBasedobject
If the method is called asynchronously,
returns the request thread.
Get Owners (getOwners) # noqa: E501
Provides a rage view of Customers that the current user has READ access to. If the current user is Tenant administrator, the result set also contains the tenant. The call is designed for the UI auto-complete component to show tenant and all possible Customers that the user may select to change the owner of the particular entity or entity group. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'READ' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_owners_using_get_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the entity group name.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataContactBasedobject
If the method is called asynchronously,
returns the request thread.
Make Entity Group Private (makeEntityGroupPrivate) # noqa: E501
Make the entity group not available for non authorized users. Every group is private by default. This call is useful to hide the group that was previously made public. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.make_entity_group_private_using_post(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
Make Entity Group Private (makeEntityGroupPrivate) # noqa: E501
Make the entity group not available for non authorized users. Every group is private by default. This call is useful to hide the group that was previously made public. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.make_entity_group_private_using_post_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
Make Entity Group Publicly available (makeEntityGroupPublic) # noqa: E501
Make the entity group available for non authorized users. Useful for public dashboards that will be embedded into the public websites. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.make_entity_group_public_using_post(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
Make Entity Group Publicly available (makeEntityGroupPublic) # noqa: E501
Make the entity group available for non authorized users. Useful for public dashboards that will be embedded into the public websites. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.make_entity_group_public_using_post_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
Remove entities from the group (removeEntitiesFromEntityGroup) # noqa: E501
Removes entities from the specified entity group. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'REMOVE_FROM_GROUP' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_entities_from_entity_group_using_post(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param list[str] body:
:return: None
If the method is called asynchronously,
returns the request thread.
Remove entities from the group (removeEntitiesFromEntityGroup) # noqa: E501
Removes entities from the specified entity group. Entity group allows you to group multiple entities of the same entity type (Device, Asset, Customer, User, Dashboard, etc). Entity Group always have an owner - particular Tenant or Customer. Each entity may belong to multiple groups simultaneously. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'REMOVE_FROM_GROUP' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_entities_from_entity_group_using_post_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param list[str] body:
:return: None
If the method is called asynchronously,
returns the request thread.
Create Or Update Entity Group (saveEntityGroup) # noqa: E501
Create or update the Entity Group. When creating Entity Group, platform generates Entity Group Id as [time-based UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_1_(date-time_and_MAC_address)). The newly created Entity Group Id will be present in the response. Specify existing Entity Group Id to update the group. Referencing non-existing Entity Group Id will cause 'Not Found' error. Entity group name is unique in the scope of owner and entity type. For example, you can't create two tenant device groups called 'Water meters'. However, you may create device and asset group with the same name. And also you may create groups with the same name for two different customers of the same tenant. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_entity_group_using_post(async_req=True)
>>> result = thread.get()
:param async_req bool
:param EntityGroup body:
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
Create Or Update Entity Group (saveEntityGroup) # noqa: E501
Create or update the Entity Group. When creating Entity Group, platform generates Entity Group Id as [time-based UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_1_(date-time_and_MAC_address)). The newly created Entity Group Id will be present in the response. Specify existing Entity Group Id to update the group. Referencing non-existing Entity Group Id will cause 'Not Found' error. Entity group name is unique in the scope of owner and entity type. For example, you can't create two tenant device groups called 'Water meters'. However, you may create device and asset group with the same name. And also you may create groups with the same name for two different customers of the same tenant. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_entity_group_using_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param EntityGroup body:
:return: EntityGroupInfo
If the method is called asynchronously,
returns the request thread.
Share the Entity Group with User group (shareEntityGroupToChildOwnerUserGroup) # noqa: E501
Share the entity group with specified user group using specified role. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.share_entity_group_to_child_owner_user_group_using_post(entity_group_id, user_group_id, role_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id that you would like to share. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str user_group_id: A string value representing the Entity(User) Group Id that you would like to share with. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str role_id: A string value representing the Role Id that describes set of permissions you would like to share (read, write, etc). For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
Share the Entity Group with User group (shareEntityGroupToChildOwnerUserGroup) # noqa: E501
Share the entity group with specified user group using specified role. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.share_entity_group_to_child_owner_user_group_using_post_with_http_info(entity_group_id, user_group_id, role_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id that you would like to share. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str user_group_id: A string value representing the Entity(User) Group Id that you would like to share with. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str role_id: A string value representing the Role Id that describes set of permissions you would like to share (read, write, etc). For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
Share the Entity Group (shareEntityGroup) # noqa: E501
Share the entity group with certain user group based on the provided Share Group Request. The request is quite flexible and processing of the request involves multiple security checks using platform RBAC feature. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.share_entity_group_using_post(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param ShareGroupRequest body:
:return: None
If the method is called asynchronously,
returns the request thread.
Share the Entity Group (shareEntityGroup) # noqa: E501
Share the entity group with certain user group based on the provided Share Group Request. The request is quite flexible and processing of the request involves multiple security checks using platform RBAC feature. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.share_entity_group_using_post_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param ShareGroupRequest body:
:return: None
If the method is called asynchronously,
returns the request thread.
Unassign entity group from edge (unassignEntityGroupFromEdge) # noqa: E501
Clears assignment of the entity group to the edge. Unassignment works in async way - first, 'unassign' notification event pushed to edge queue on platform. Second, remote edge service will receive an 'unassign' command to remove entity group (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once 'unassign' command will be delivered to edge service, it's going to remove entity group and entities inside this group locally. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_entity_group_from_edge_using_delete(edge_id, group_type, entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityGroup
If the method is called asynchronously,
returns the request thread.
Unassign entity group from edge (unassignEntityGroupFromEdge) # noqa: E501
Clears assignment of the entity group to the edge. Unassignment works in async way - first, 'unassign' notification event pushed to edge queue on platform. Second, remote edge service will receive an 'unassign' command to remove entity group (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once 'unassign' command will be delivered to edge service, it's going to remove entity group and entities inside this group locally. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_entity_group_from_edge_using_delete_with_http_info(edge_id, group_type, entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityGroup
If the method is called asynchronously,
returns the request thread.
ThingsBoard REST API
ThingsBoard Professional Edition IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3PAAS-RC1
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 noqa: F401 python 2 and python 3 compatibility library noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'entity_group_id' is set noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 HTTP header `Content-Type` noqa: E501 noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'edge_id' is set noqa: E501 verify the required parameter 'group_type' is set noqa: E501 verify the required parameter 'entity_group_id' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'entity_group_id' is set noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'edge_id' is set noqa: E501 verify the required parameter 'group_type' is set noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'edge_id' is set noqa: E501 verify the required parameter 'group_type' is set noqa: E501 verify the required parameter 'page_size' is set noqa: E501 verify the required parameter 'page' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'entity_group_id' is set noqa: E501 verify the required parameter 'page_size' is set noqa: E501 verify the required parameter 'page' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'owner_type' is set noqa: E501 verify the required parameter 'owner_id' is set noqa: E501 verify the required parameter 'group_type' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'entity_group_id' is set noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'owner_type' is set noqa: E501 verify the required parameter 'owner_id' is set noqa: E501 verify the required parameter 'group_type' is set noqa: E501 verify the required parameter 'group_name' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'entity_group_ids' is set noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'owner_type' is set noqa: E501 verify the required parameter 'owner_id' is set noqa: E501 verify the required parameter 'group_type' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'group_type' is set noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'entity_type' is set noqa: E501 verify the required parameter 'entity_id' is set noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'entity_group_id' is set noqa: E501 verify the required parameter 'entity_id' is set noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'page_size' is set noqa: E501 verify the required parameter 'page' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'entity_group_id' is set noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'entity_group_id' is set noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'entity_group_id' is set noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 HTTP header `Content-Type` noqa: E501 noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 HTTP header `Content-Type` noqa: E501 noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'entity_group_id' is set noqa: E501 verify the required parameter 'user_group_id' is set noqa: E501 verify the required parameter 'role_id' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'entity_group_id' is set noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 HTTP header `Content-Type` noqa: E501 noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'edge_id' is set noqa: E501 verify the required parameter 'group_type' is set noqa: E501 verify the required parameter 'entity_group_id' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 | 61,861 | en | 0.706666 |
#==========================================================
#
# This prepare the hdf5 datasets of the DRIVE database
#
#============================================================
import os
import h5py
import numpy as np
from PIL import Image
#content/add2/E2/DRIVE_datasets_training_testing
def write_hdf5(arr,outfile):
with h5py.File(outfile,"w") as f:
f.create_dataset("image", data=arr, dtype=arr.dtype)
#------------Path of the images --------------------------------------------------------------
#train
original_imgs_train = "/content/add2/E2/training/images/"
groundTruth_imgs_train = "/content/add2/E2/training/1st_manual/"
borderMasks_imgs_train = "/content/add2/E2/training/mask/"
#test
original_imgs_test = "/content/add2/E2//test/images/"
groundTruth_imgs_test = "/content/add2/E2/test/1st_manual/"
borderMasks_imgs_test = "content/add2/E2/test/mask/"
#---------------------------------------------------------------------------------------------
Nimgs = 20
channels = 3
height = 584
width = 565
dataset_path = "/content/add2/E2/DRIVE_datasets_training_testing/"
def get_datasets(imgs_dir,groundTruth_dir,borderMasks_dir,train_test="null"):
imgs = np.empty((Nimgs,height,width,channels))
groundTruth = np.empty((Nimgs,height,width))
border_masks = np.empty((Nimgs,height,width))
for path, subdirs, files in os.walk(imgs_dir): #list all files, directories in the path
for i in range(len(files)):
#original
print ("original image: " +files[i])
img = Image.open(imgs_dir+files[i])
imgs[i] = np.asarray(img)
#corresponding ground truth
groundTruth_name = files[i][0:2] + "_manual1.gif"
print ("ground truth name: " + groundTruth_name)
g_truth = Image.open(groundTruth_dir + groundTruth_name)
groundTruth[i] = np.asarray(g_truth)
#corresponding border masks
border_masks_name = ""
if train_test=="train":
border_masks_name = files[i][0:2] + "_training_mask.gif"
elif train_test=="test":
border_masks_name = files[i][0:2] + "_test_mask.gif"
else:
print ("specify if train or test!!")
exit()
print ("border masks name: " + border_masks_name)
b_mask = Image.open(borderMasks_dir + border_masks_name)
border_masks[i] = np.asarray(b_mask)
print ("imgs max: " +str(np.max(imgs)))
print ("imgs min: " +str(np.min(imgs)))
assert(np.max(groundTruth)==255 and np.max(border_masks)==255)
assert(np.min(groundTruth)==0 and np.min(border_masks)==0)
print ("ground truth and border masks are correctly withih pixel value range 0-255 (black-white)")
#reshaping for my standard tensors
imgs = np.transpose(imgs,(0,3,1,2))
assert(imgs.shape == (Nimgs,channels,height,width))
groundTruth = np.reshape(groundTruth,(Nimgs,1,height,width))
border_masks = np.reshape(border_masks,(Nimgs,1,height,width))
assert(groundTruth.shape == (Nimgs,1,height,width))
assert(border_masks.shape == (Nimgs,1,height,width))
return imgs, groundTruth, border_masks
if not os.path.exists(dataset_path):
os.makedirs(dataset_path)
#getting the training datasets
imgs_train, groundTruth_train, border_masks_train = get_datasets(original_imgs_train,groundTruth_imgs_train,borderMasks_imgs_train,"train")
print ("saving train datasets")
write_hdf5(imgs_train, dataset_path + "DRIVE_dataset_imgs_train.hdf5")
write_hdf5(groundTruth_train, dataset_path + "DRIVE_dataset_groundTruth_train.hdf5")
write_hdf5(border_masks_train,dataset_path + "DRIVE_dataset_borderMasks_train.hdf5")
#getting the testing datasets
imgs_test, groundTruth_test, border_masks_test = get_datasets(original_imgs_test,groundTruth_imgs_test,borderMasks_imgs_test,"test")
print ("saving test datasets")
write_hdf5(imgs_test,dataset_path + "DRIVE_dataset_imgs_test.hdf5")
write_hdf5(groundTruth_test, dataset_path + "DRIVE_dataset_groundTruth_test.hdf5")
write_hdf5(border_masks_test,dataset_path + "DRIVE_dataset_borderMasks_test.hdf5")
| prepare_datasets_DRIVE.py | 4,135 | ========================================================== This prepare the hdf5 datasets of the DRIVE database============================================================content/add2/E2/DRIVE_datasets_training_testing------------Path of the images --------------------------------------------------------------traintest---------------------------------------------------------------------------------------------list all files, directories in the pathoriginalcorresponding ground truthcorresponding border masksreshaping for my standard tensorsgetting the training datasetsgetting the testing datasets | 603 | en | 0.415548 |
import cv2
cap = cv2.VideoCapture(1)
cap.set(3, 640) #WIDTH
cap.set(4, 480) #HEIGHT
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
while True:
# while True:
# ret, frame = cap.read()
#
# # Our operations on the frame come here
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# try:
# number = len(faces)
# size = [faces[0][2], faces[0][3]]
# position = [faces[0][0], faces[0][1]]
# break
# except:
# a = 1
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# print(number)
# print(size)
# print(position)
#print(len(faces))
# Display the resulting frame
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| faces.py | 1,244 | WIDTHHEIGHT while True: ret, frame = cap.read() Our operations on the frame come here gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) try: number = len(faces) size = [faces[0][2], faces[0][3]] position = [faces[0][0], faces[0][1]] break except: a = 1 Our operations on the frame come here print(number) print(size) print(position)print(len(faces)) Display the resulting frame When everything done, release the capture | 533 | en | 0.521876 |
#!/usr/bin/env python
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree.
from __future__ import print_function
from clang.cindex import Cursor, CursorKind, TokenKind
from utils import range_dict_relative
import ctypes
import itertools
import re
# Function/method cursor kinds.
FUNCTION_KINDS = set([
'FUNCTION_DECL',
'FUNCTION_TEMPLATE',
'CXX_METHOD',
'CONSTRUCTOR',
'DESTRUCTOR',
'OBJC_INSTANCE_METHOD_DECL',
'OBJC_CLASS_METHOD_DECL',
])
# Class-like cursors.
CLASS_KINDS = set([
'STRUCT_DECL',
'UNION_DECL',
'CLASS_DECL',
'ENUM_DECL',
'OBJC_INTERFACE_DECL',
'OBJC_CATEGORY_DECL',
'OBJC_PROTOCOL_DECL',
'OBJC_IMPLEMENTATION_DECL',
'OBJC_CATEGORY_IMPL_DECL',
'CLASS_TEMPLATE',
'CLASS_TEMPLATE_PARTIAL_SPECIALIZATION',
'NAMESPACE',
])
# (Possibly external) members of CLASS_KINDS.
MEMBER_KINDS = set([
'CXX_METHOD',
'CONSTRUCTOR',
'DESTRUCTOR',
'FIELD_DECL',
'VAR_DECL',
'ENUM_CONSTANT_DECL',
])
# Variables and fields.
VAR_KINDS = set([
'OBJC_IVAR_DECL',
'FIELD_DECL',
'VAR_DECL',
])
# Capture the ubiquitous GTest-style TEST/TEST_F macros.
GTEST_MACROS = set(['TEST', 'TEST_F'])
MACRO_INSTANTIATION = 'MACRO_INSTANTIATION'
OTHER_KINDS = set([
MACRO_INSTANTIATION,
])
# Record any of the cursor types listed above.
ALL_KINDS = FUNCTION_KINDS | CLASS_KINDS | MEMBER_KINDS | VAR_KINDS | OTHER_KINDS
# People like adding a '-' by convention, but strip that out.
PRAGMA_MARK_REGEX = re.compile(
'^[ \t]*#[ \t]*pragma[ \t]+mark[ \t]+(?:-[ \t]*)?(.+)$', re.MULTILINE)
def visit_cursor(libclang, cursor):
try:
kind = cursor.kind.name
except:
# Some cursor kinds aren't supported by the Python binding.
return None
if kind not in ALL_KINDS:
return None
# Skip symbols from other files.
if not libclang.clang_Location_isFromMainFile(cursor.location):
return None
# Names of function parameters.
params = None
# Names of template parameters.
tparams = None
children = None
name = cursor.spelling
# Display types for variables and typedefs.
cursor_type = cursor.type.spelling if kind in VAR_KINDS else None
if kind in FUNCTION_KINDS:
# We can't use displayname as it also includes the arguments.
params = []
tparams = []
for child in cursor.get_children():
if child.kind == CursorKind.PARM_DECL:
# Use the param name, but fall back to the raw type if unnamed.
params.append(child.spelling or child.type.spelling)
elif child.kind == CursorKind.TEMPLATE_TYPE_PARAMETER:
tparams.append(child.spelling)
# TODO(hansonw): non-type and "template template" params?
if kind in MEMBER_KINDS:
# Name should be fully qualified if outside the parent.
if cursor.semantic_parent != cursor.lexical_parent:
name = cursor.semantic_parent.spelling + '::' + name
elif kind in CLASS_KINDS:
# Include template information.
name = cursor.displayname
children = []
for child in cursor.get_children():
child_outline = visit_cursor(libclang, child)
if child_outline is not None:
children.append(child_outline)
if kind == MACRO_INSTANTIATION:
params = []
if name in GTEST_MACROS:
# Should look like TEST(id, id).
tokens = list(itertools.islice(cursor.get_tokens(), 1, 6))
if len(tokens) == 5 and (
tokens[0].kind == TokenKind.PUNCTUATION and
tokens[1].kind == TokenKind.IDENTIFIER and
tokens[2].kind == TokenKind.PUNCTUATION and
tokens[3].kind == TokenKind.IDENTIFIER and
tokens[4].kind == TokenKind.PUNCTUATION
):
params = [tokens[1].spelling, tokens[3].spelling]
else:
return None
else:
# TODO(hansonw): Handle other special macros like DEFINE_ params.
return None
ret = {
'name': name,
'cursor_kind': kind,
'cursor_type': cursor_type,
'extent': range_dict_relative(cursor.extent),
'params': params,
'tparams': tparams,
'children': children,
}
return {k: v for k, v in ret.items() if v is not None}
# Scan through the outline tree and insert pragma marks as we pass by them.
def insert_pragma_marks(marks, outline_tree, tree_end=None):
new_result = []
for node in outline_tree:
while len(marks) > 0:
if marks[-1]['extent']['start']['row'] > node['extent']['start']['row']:
break
new_result.append(marks.pop())
children = node.get('children')
if children:
children[:] = insert_pragma_marks(marks, children, node['extent']['end']['row'])
new_result.append(node)
# Consume all remaining marks included in this subtree.
while len(marks) > 0:
if tree_end is not None and marks[-1]['extent']['start']['row'] > tree_end:
break
new_result.append(marks.pop())
return new_result
def get_outline(libclang, translation_unit, contents):
root_cursor = translation_unit.cursor
# This is the same as Cursor.get_children minus an assert in visitor().
# This results in a ~2x speedup!
callback_type = ctypes.CFUNCTYPE(ctypes.c_int, Cursor, Cursor, ctypes.py_object)
def visitor(child, parent, result):
child._tu = translation_unit
child_outline = visit_cursor(libclang, child)
if child_outline is not None:
result.append(child_outline)
return 1 # continue
result = []
libclang.clang_visitChildren(root_cursor, callback_type(visitor), result)
# Look for pragma marks. These are not detectable in the AST.
line = 0
lastpos = 0
pragma_marks = []
for mark in PRAGMA_MARK_REGEX.finditer(contents):
while lastpos < mark.start():
if contents[lastpos] == '\n':
line += 1
lastpos += 1
pragma_marks.append({
'name': mark.group(1),
'cursor_kind': 'PRAGMA_MARK',
'extent': {
'start': {'row': line, 'column': 0},
'end': {'row': line + 1, 'column': 0},
},
})
# Top-level macro instantiations appear out of order.
result = sorted(result, key=lambda x: (
x['extent']['start']['row'],
x['extent']['start']['column'],
x['extent']['end']['row'],
x['extent']['end']['column'],
))
# Convert into a stack for efficient removal.
pragma_marks.reverse()
return insert_pragma_marks(pragma_marks, result)
| node_modules/nuclide/pkg/nuclide-clang-rpc/python/outline.py | 6,952 | !/usr/bin/env python Copyright (c) 2015-present, Facebook, Inc. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. Function/method cursor kinds. Class-like cursors. (Possibly external) members of CLASS_KINDS. Variables and fields. Capture the ubiquitous GTest-style TEST/TEST_F macros. Record any of the cursor types listed above. People like adding a '-' by convention, but strip that out. Some cursor kinds aren't supported by the Python binding. Skip symbols from other files. Names of function parameters. Names of template parameters. Display types for variables and typedefs. We can't use displayname as it also includes the arguments. Use the param name, but fall back to the raw type if unnamed. TODO(hansonw): non-type and "template template" params? Name should be fully qualified if outside the parent. Include template information. Should look like TEST(id, id). TODO(hansonw): Handle other special macros like DEFINE_ params. Scan through the outline tree and insert pragma marks as we pass by them. Consume all remaining marks included in this subtree. This is the same as Cursor.get_children minus an assert in visitor(). This results in a ~2x speedup! continue Look for pragma marks. These are not detectable in the AST. Top-level macro instantiations appear out of order. Convert into a stack for efficient removal. | 1,418 | en | 0.864255 |
# -*- coding: utf-8 -*-
"""
@date Created on Fri May 22 2020
@author martin_g for Eomys
"""
# Third party imports
import numpy as np
from scipy import signal
# Local application imports
from mosqito.sq_metrics.loudness.loudness_zwtv._square_and_smooth import (
_square_and_smooth,
)
def _third_octave_levels(sig, fs):
"""3rd octave filtering, squaring, smoothing, level calculation and
downsampling to temporal resolution: 0,5 ms, i.e. sampling rate: 2 kHz
See ISO 532-1 section 6.3
Parameters
----------
sig : numpy.ndarray
time signal sampled at 48 kHz[pa]
fs : int
time signal sampling frequency
Outputs
-------
third_octave_levels : numpy.ndarray
Set of time signals filtered per third octave bands
"""
# Sampling frequency shall be equal to 48 kHz (as per ISO 532)
if fs != 48000:
raise ValueError("""ERROR: Sampling frequency shall be equal to 48 kHz""")
# Constants
n_level_band = 28
n_filter_coeff = 6
dec_factor = int(fs / 2000)
# Initialisation
coeff = np.zeros(n_filter_coeff)
# Filter coefficients of one-third-octave-band filters (reference
# table)
# ISO 532-1 Table A.1
third_octave_filter_ref = np.array(
[[1, 2, 1, 1, -2, 1], [1, 0, -1, 1, -2, 1], [1, -2, 1, 1, -2, 1]]
)
# Filter coefficients of one-third-octave-band filters (difference to
# reference table for 28 one-third-octave-band filters)
# ISO 532-1 Table A.2
third_octave_filter = np.array(
[
[
[0, 0, 0, 0, -6.70260e-004, 6.59453e-004],
[0, 0, 0, 0, -3.75071e-004, 3.61926e-004],
[0, 0, 0, 0, -3.06523e-004, 2.97634e-004],
],
[
[0, 0, 0, 0, -8.47258e-004, 8.30131e-004],
[0, 0, 0, 0, -4.76448e-004, 4.55616e-004],
[0, 0, 0, 0, -3.88773e-004, 3.74685e-004],
],
[
[0, 0, 0, 0, -1.07210e-003, 1.04496e-003],
[0, 0, 0, 0, -6.06567e-004, 5.73553e-004],
[0, 0, 0, 0, -4.94004e-004, 4.71677e-004],
],
[
[0, 0, 0, 0, -1.35836e-003, 1.31535e-003],
[0, 0, 0, 0, -7.74327e-004, 7.22007e-004],
[0, 0, 0, 0, -6.29154e-004, 5.93771e-004],
],
[
[0, 0, 0, 0, -1.72380e-003, 1.65564e-003],
[0, 0, 0, 0, -9.91780e-004, 9.08866e-004],
[0, 0, 0, 0, -8.03529e-004, 7.47455e-004],
],
[
[0, 0, 0, 0, -2.19188e-003, 2.08388e-003],
[0, 0, 0, 0, -1.27545e-003, 1.14406e-003],
[0, 0, 0, 0, -1.02976e-003, 9.40900e-004],
],
[
[0, 0, 0, 0, -2.79386e-003, 2.62274e-003],
[0, 0, 0, 0, -1.64828e-003, 1.44006e-003],
[0, 0, 0, 0, -1.32520e-003, 1.18438e-003],
],
[
[0, 0, 0, 0, -3.57182e-003, 3.30071e-003],
[0, 0, 0, 0, -2.14252e-003, 1.81258e-003],
[0, 0, 0, 0, -1.71397e-003, 1.49082e-003],
],
[
[0, 0, 0, 0, -4.58305e-003, 4.15355e-003],
[0, 0, 0, 0, -2.80413e-003, 2.28135e-003],
[0, 0, 0, 0, -2.23006e-003, 1.87646e-003],
],
[
[0, 0, 0, 0, -5.90655e-003, 5.22622e-003],
[0, 0, 0, 0, -3.69947e-003, 2.87118e-003],
[0, 0, 0, 0, -2.92205e-003, 2.36178e-003],
],
[
[0, 0, 0, 0, -7.65243e-003, 6.57493e-003],
[0, 0, 0, 0, -4.92540e-003, 3.61318e-003],
[0, 0, 0, 0, -3.86007e-003, 2.97240e-003],
],
[
[0, 0, 0, 0, -1.00023e-002, 8.29610e-003],
[0, 0, 0, 0, -6.63788e-003, 4.55999e-003],
[0, 0, 0, 0, -5.15982e-003, 3.75306e-003],
],
[
[0, 0, 0, 0, -1.31230e-002, 1.04220e-002],
[0, 0, 0, 0, -9.02274e-003, 5.73132e-003],
[0, 0, 0, 0, -6.94543e-003, 4.71734e-003],
],
[
[0, 0, 0, 0, -1.73693e-002, 1.30947e-002],
[0, 0, 0, 0, -1.24176e-002, 7.20526e-003],
[0, 0, 0, 0, -9.46002e-003, 5.93145e-003],
],
[
[0, 0, 0, 0, -2.31934e-002, 1.64308e-002],
[0, 0, 0, 0, -1.73009e-002, 9.04761e-003],
[0, 0, 0, 0, -1.30358e-002, 7.44926e-003],
],
[
[0, 0, 0, 0, -3.13292e-002, 2.06370e-002],
[0, 0, 0, 0, -2.44342e-002, 1.13731e-002],
[0, 0, 0, 0, -1.82108e-002, 9.36778e-003],
],
[
[0, 0, 0, 0, -4.28261e-002, 2.59325e-002],
[0, 0, 0, 0, -3.49619e-002, 1.43046e-002],
[0, 0, 0, 0, -2.57855e-002, 1.17912e-002],
],
[
[0, 0, 0, 0, -5.91733e-002, 3.25054e-002],
[0, 0, 0, 0, -5.06072e-002, 1.79513e-002],
[0, 0, 0, 0, -3.69401e-002, 1.48094e-002],
],
[
[0, 0, 0, 0, -8.26348e-002, 4.05894e-002],
[0, 0, 0, 0, -7.40348e-002, 2.24476e-002],
[0, 0, 0, 0, -5.34977e-002, 1.85371e-002],
],
[
[0, 0, 0, 0, -1.17018e-001, 5.08116e-002],
[0, 0, 0, 0, -1.09516e-001, 2.81387e-002],
[0, 0, 0, 0, -7.85097e-002, 2.32872e-002],
],
[
[0, 0, 0, 0, -1.67714e-001, 6.37872e-002],
[0, 0, 0, 0, -1.63378e-001, 3.53729e-002],
[0, 0, 0, 0, -1.16419e-001, 2.93723e-002],
],
[
[0, 0, 0, 0, -2.42528e-001, 7.98576e-002],
[0, 0, 0, 0, -2.45161e-001, 4.43370e-002],
[0, 0, 0, 0, -1.73972e-001, 3.70015e-002],
],
[
[0, 0, 0, 0, -3.53142e-001, 9.96330e-002],
[0, 0, 0, 0, -3.69163e-001, 5.53535e-002],
[0, 0, 0, 0, -2.61399e-001, 4.65428e-002],
],
[
[0, 0, 0, 0, -5.16316e-001, 1.24177e-001],
[0, 0, 0, 0, -5.55473e-001, 6.89403e-002],
[0, 0, 0, 0, -3.93998e-001, 5.86715e-002],
],
[
[0, 0, 0, 0, -7.56635e-001, 1.55023e-001],
[0, 0, 0, 0, -8.34281e-001, 8.58123e-002],
[0, 0, 0, 0, -5.94547e-001, 7.43960e-002],
],
[
[0, 0, 0, 0, -1.10165e000, 1.91713e-001],
[0, 0, 0, 0, -1.23939e000, 1.05243e-001],
[0, 0, 0, 0, -8.91666e-001, 9.40354e-002],
],
[
[0, 0, 0, 0, -1.58477e000, 2.39049e-001],
[0, 0, 0, 0, -1.80505e000, 1.28794e-001],
[0, 0, 0, 0, -1.32500e000, 1.21333e-001],
],
[
[0, 0, 0, 0, -2.50630e000, 1.42308e-001],
[0, 0, 0, 0, -2.19464e000, 2.76470e-001],
[0, 0, 0, 0, -1.90231e000, 1.47304e-001],
],
]
)
# Filter gain values
# ISO 532-1 Table A.2
filter_gain = np.array(
[
4.30764e-011,
8.59340e-011,
1.71424e-010,
3.41944e-010,
6.82035e-010,
1.36026e-009,
2.71261e-009,
5.40870e-009,
1.07826e-008,
2.14910e-008,
4.28228e-008,
8.54316e-008,
1.70009e-007,
3.38215e-007,
6.71990e-007,
1.33531e-006,
2.65172e-006,
5.25477e-006,
1.03780e-005,
2.04870e-005,
4.05198e-005,
7.97914e-005,
1.56511e-004,
3.04954e-004,
5.99157e-004,
1.16544e-003,
2.27488e-003,
3.91006e-003,
]
)
# Definition of the range of preferred filter center frequency
freq = [
25,
31.5,
40,
50,
63,
80,
100,
125,
160,
200,
250,
315,
400,
500,
630,
800,
1000,
1250,
1600,
2000,
2500,
3150,
4000,
5000,
6300,
8000,
10000,
12500,
]
n_time = len(sig[::dec_factor])
time_axis = np.linspace(0, len(sig) / fs, num=n_time)
third_octave_level = np.zeros((n_level_band, n_time))
for i_bands in range(n_level_band):
# Initialisation
tiny_value = 10 ** -12
i_ref = 4 * 10 ** -10
# 2nd order fltering (See ISO 532-1 section 6.3 and A.2)
coeff = third_octave_filter_ref - third_octave_filter[i_bands, :, :]
sig_filt = filter_gain[i_bands] * signal.sosfilt(coeff, sig)
# Calculate center frequency of filter
center_freq = 10 ** ((i_bands - 16) / 10) * 1000
# Squaring and smoothing of filtered signal
sig_filt = _square_and_smooth(sig_filt, center_freq, 48000)
# SPL calculation and decimation
third_octave_level[i_bands, :] = 10 * np.log10(
(sig_filt[::dec_factor] + tiny_value) / i_ref
)
return third_octave_level, time_axis, freq
| mosqito/sq_metrics/loudness/loudness_zwtv/_third_octave_levels.py | 9,549 | 3rd octave filtering, squaring, smoothing, level calculation and
downsampling to temporal resolution: 0,5 ms, i.e. sampling rate: 2 kHz
See ISO 532-1 section 6.3
Parameters
----------
sig : numpy.ndarray
time signal sampled at 48 kHz[pa]
fs : int
time signal sampling frequency
Outputs
-------
third_octave_levels : numpy.ndarray
Set of time signals filtered per third octave bands
@date Created on Fri May 22 2020
@author martin_g for Eomys
-*- coding: utf-8 -*- Third party imports Local application imports Sampling frequency shall be equal to 48 kHz (as per ISO 532) Constants Initialisation Filter coefficients of one-third-octave-band filters (reference table) ISO 532-1 Table A.1 Filter coefficients of one-third-octave-band filters (difference to reference table for 28 one-third-octave-band filters) ISO 532-1 Table A.2 Filter gain values ISO 532-1 Table A.2 Definition of the range of preferred filter center frequency Initialisation 2nd order fltering (See ISO 532-1 section 6.3 and A.2) Calculate center frequency of filter Squaring and smoothing of filtered signal SPL calculation and decimation | 1,125 | en | 0.699869 |
# -*- coding: utf-8 -*-
import uuid
import pytz
from faker import Faker
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.gis.db.models import PointField
from django.contrib.postgres.indexes import BrinIndex
from django.contrib.postgres.fields import JSONField
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.template.defaultfilters import slugify
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from oauthlib.common import generate_token
from oauth2_provider.models import (
Application,
AbstractApplication,
AbstractAccessToken,
AccessToken,
RefreshToken
)
from foundation.constants import *
class ProblemDataSheetManager(models.Manager):
def delete_all(self):
items = ProblemDataSheet.objects.all()
for item in items.all():
item.delete()
# def seed(self, user, product, length=25):
# results = []
# faker = Faker('en_CA')
# for i in range(0,length):
# farm = ProblemDataSheet.objects.create(
# name = faker.domain_word(),
# description = faker.sentence(nb_words=6, variable_nb_words=True, ext_word_list=None),
# user = user,
# product = product,
# )
# results.append(farm)
# return results
class ProblemDataSheet(models.Model):
"""
Class model represents danger / destructive element to a production crop.
Special thanks:
(1) Preventing, Diagnosing, and Correcting Common Houseplant Problems via URL
https://extension.psu.edu/preventing-diagnosing-and-correcting-common-houseplant-problems
"""
'''
Metadata
'''
class Meta:
app_label = 'foundation'
db_table = 'mika_problem_data_sheet'
verbose_name = _('Problem Data Sheet')
verbose_name_plural = _('Problem Data Sheets')
default_permissions = ()
permissions = (
# ("can_get_opening_hours_specifications", "Can get opening hours specifications"),
# ("can_get_opening_hours_specification", "Can get opening hours specifications"),
# ("can_post_opening_hours_specification", "Can create opening hours specifications"),
# ("can_put_opening_hours_specification", "Can update opening hours specifications"),
# ("can_delete_opening_hours_specification", "Can delete opening hours specifications"),
)
'''
Constants & Choices
'''
class TYPE_OF:
PEST = 1
DISEASE = 2
ABIOTIC = 3
NONE = 4
TYPE_OF_CHOICES = (
(TYPE_OF.PEST, _('Pest')),
(TYPE_OF.DISEASE, _('Disease')),
(TYPE_OF.ABIOTIC, _('Abiotic')),
(TYPE_OF.NONE, _('None')),
)
'''
Object Managers
'''
objects = ProblemDataSheetManager()
'''
Fields
'''
#
# Internal Related Fields
#
slug = models.SlugField(
_("Slug"),
help_text=_('The unique slug used for this crop when accessing details page.'),
max_length=127,
blank=True,
null=False,
db_index=True,
unique=True,
editable=False,
)
text = models.CharField(
_("Text"),
max_length=127,
help_text=_('The variety name of the crop.'),
blank=True,
null=True,
db_index=True,
)
type_of = models.PositiveSmallIntegerField(
_("Type of"),
help_text=_('The type of production crop problem.'),
blank=False,
null=False,
choices=TYPE_OF_CHOICES,
)
'''
Methods
'''
def __str__(self):
return str(self.slug)
| mikaponics/foundation/models/problem_data_sheet.py | 3,923 | Class model represents danger / destructive element to a production crop.
Special thanks:
(1) Preventing, Diagnosing, and Correcting Common Houseplant Problems via URL
https://extension.psu.edu/preventing-diagnosing-and-correcting-common-houseplant-problems
-*- coding: utf-8 -*- def seed(self, user, product, length=25): results = [] faker = Faker('en_CA') for i in range(0,length): farm = ProblemDataSheet.objects.create( name = faker.domain_word(), description = faker.sentence(nb_words=6, variable_nb_words=True, ext_word_list=None), user = user, product = product, ) results.append(farm) return results ("can_get_opening_hours_specifications", "Can get opening hours specifications"), ("can_get_opening_hours_specification", "Can get opening hours specifications"), ("can_post_opening_hours_specification", "Can create opening hours specifications"), ("can_put_opening_hours_specification", "Can update opening hours specifications"), ("can_delete_opening_hours_specification", "Can delete opening hours specifications"), Internal Related Fields | 1,145 | en | 0.714441 |
# Copyright (c) 2015 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import logging
from . import util
from inferlib import jwlib
MODULE_NAME = __name__
MODULE_DESCRIPTION = '''Run analysis of code built with a command like:
ant [options] [target]
Analysis examples:
infer -- ant compile'''
LANG = ['java']
def gen_instance(*args):
return AntCapture(*args)
# This creates an empty argparser for the module, which provides only
# description/usage information and no arguments.
create_argparser = util.base_argparser(MODULE_DESCRIPTION, MODULE_NAME)
class AntCapture:
def __init__(self, args, cmd):
self.args = args
util.log_java_version()
logging.info(util.run_cmd_ignore_fail(['ant', '-version']))
# TODO: make the extraction of targets smarter
self.build_cmd = ['ant', '-verbose'] + cmd[1:]
def is_interesting(self, content):
return self.is_quoted(content) or content.endswith('.java')
def is_quoted(self, argument):
quote = '\''
return len(argument) > 2 and argument[0] == quote\
and argument[-1] == quote
def remove_quotes(self, argument):
if self.is_quoted(argument):
return argument[1:-1]
else:
return argument
def get_infer_commands(self, verbose_output):
javac_pattern = '[javac]'
argument_start_pattern = 'Compilation arguments'
calls = []
javac_arguments = []
collect = False
for line in verbose_output:
if javac_pattern in line:
if argument_start_pattern in line:
collect = True
if javac_arguments != []:
capture = jwlib.create_infer_command(self.args,
javac_arguments)
calls.append(capture)
javac_arguments = []
if collect:
pos = line.index(javac_pattern) + len(javac_pattern)
content = line[pos:].strip()
if self.is_interesting(content):
arg = self.remove_quotes(content)
javac_arguments.append(arg)
if javac_arguments != []:
capture = jwlib.create_infer_command(self.args, javac_arguments)
calls.append(capture)
javac_arguments = []
return calls
def capture(self):
cmds = self.get_infer_commands(util.get_build_output(self.build_cmd))
clean_cmd = '%s clean' % self.build_cmd[0]
return util.run_compilation_commands(cmds, clean_cmd)
| infer/lib/python/inferlib/capture/ant.py | 2,881 | Copyright (c) 2015 - present Facebook, Inc. All rights reserved. This source code is licensed under the BSD style license found in the LICENSE file in the root directory of this source tree. An additional grant of patent rights can be found in the PATENTS file in the same directory. This creates an empty argparser for the module, which provides only description/usage information and no arguments. TODO: make the extraction of targets smarter | 444 | en | 0.866611 |
#!/usr/bin/env python
# --coding:utf-8--
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
import logging
from nebula2.common.ttypes import ErrorCode
from nebula2.Exception import (
AuthFailedException,
IOErrorException,
NotValidConnectionException,
InValidHostname,
)
from nebula2.data.ResultSet import ResultSet
from nebula2.gclient.net.AuthResult import AuthResult
from nebula2.gclient.net.Session import Session
from nebula2.gclient.net.Connection import Connection
from nebula2.gclient.net.ConnectionPool import ConnectionPool
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)-8s [%(filename)s:%(lineno)d]:%(message)s')
| nebula2/gclient/net/__init__.py | 814 | !/usr/bin/env python --coding:utf-8-- Copyright (c) 2020 vesoft inc. All rights reserved. This source code is licensed under Apache 2.0 License, attached with Common Clause Condition 1.0, found in the LICENSES directory. | 220 | en | 0.862372 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
A family of functions used by CurvefittingAssessor
"""
import numpy as np
all_models = {}
model_para = {}
model_para_num = {}
curve_combination_models = ['vap', 'pow3', 'linear', 'logx_linear', 'dr_hill_zero_background', 'log_power', 'pow4', 'mmf',
'exp4', 'ilog2', 'weibull', 'janoschek']
def vap(x, a, b, c):
"""Vapor pressure model
Parameters
----------
x : int
a : float
b : float
c : float
Returns
-------
float
np.exp(a+b/x+c*np.log(x))
"""
return np.exp(a+b/x+c*np.log(x))
all_models['vap'] = vap
model_para['vap'] = [-0.622028, -0.470050, 0.042322]
model_para_num['vap'] = 3
def pow3(x, c, a, alpha):
"""pow3
Parameters
----------
x : int
c : float
a : float
alpha : float
Returns
-------
float
c - a * x**(-alpha)
"""
return c - a * x**(-alpha)
all_models['pow3'] = pow3
model_para['pow3'] = [0.84, 0.52, 0.01]
model_para_num['pow3'] = 3
def linear(x, a, b):
"""linear
Parameters
----------
x : int
a : float
b : float
Returns
-------
float
a*x + b
"""
return a*x + b
all_models['linear'] = linear
model_para['linear'] = [1., 0]
model_para_num['linear'] = 2
def logx_linear(x, a, b):
"""logx linear
Parameters
----------
x : int
a : float
b : float
Returns
-------
float
a * np.log(x) + b
"""
x = np.log(x)
return a*x + b
all_models['logx_linear'] = logx_linear
model_para['logx_linear'] = [0.378106, 0.046506]
model_para_num['logx_linear'] = 2
def dr_hill_zero_background(x, theta, eta, kappa):
"""dr hill zero background
Parameters
----------
x : int
theta : float
eta : float
kappa : float
Returns
-------
float
(theta* x**eta) / (kappa**eta + x**eta)
"""
return (theta* x**eta) / (kappa**eta + x**eta)
all_models['dr_hill_zero_background'] = dr_hill_zero_background
model_para['dr_hill_zero_background'] = [0.772320, 0.586449, 2.460843]
model_para_num['dr_hill_zero_background'] = 3
def log_power(x, a, b, c):
""""logistic power
Parameters
----------
x : int
a : float
b : float
c : float
Returns
-------
float
a/(1.+(x/np.exp(b))**c)
"""
return a/(1.+(x/np.exp(b))**c)
all_models['log_power'] = log_power
model_para['log_power'] = [0.77, 2.98, -0.51]
model_para_num['log_power'] = 3
def pow4(x, alpha, a, b, c):
"""pow4
Parameters
----------
x : int
alpha : float
a : float
b : float
c : float
Returns
-------
float
c - (a*x+b)**-alpha
"""
return c - (a*x+b)**-alpha
all_models['pow4'] = pow4
model_para['pow4'] = [0.1, 200, 0., 0.8]
model_para_num['pow4'] = 4
def mmf(x, alpha, beta, kappa, delta):
"""Morgan-Mercer-Flodin
http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm
Parameters
----------
x : int
alpha : float
beta : float
kappa : float
delta : float
Returns
-------
float
alpha - (alpha - beta) / (1. + (kappa * x)**delta)
"""
return alpha - (alpha - beta) / (1. + (kappa * x)**delta)
all_models['mmf'] = mmf
model_para['mmf'] = [0.7, 0.1, 0.01, 5]
model_para_num['mmf'] = 4
def exp4(x, c, a, b, alpha):
"""exp4
Parameters
----------
x : int
c : float
a : float
b : float
alpha : float
Returns
-------
float
c - np.exp(-a*(x**alpha)+b)
"""
return c - np.exp(-a*(x**alpha)+b)
all_models['exp4'] = exp4
model_para['exp4'] = [0.7, 0.8, -0.8, 0.3]
model_para_num['exp4'] = 4
def ilog2(x, c, a):
"""ilog2
Parameters
----------
x : int
c : float
a : float
Returns
-------
float
c - a / np.log(x)
"""
return c - a / np.log(x)
all_models['ilog2'] = ilog2
model_para['ilog2'] = [0.78, 0.43]
model_para_num['ilog2'] = 2
def weibull(x, alpha, beta, kappa, delta):
"""Weibull model
http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm
Parameters
----------
x : int
alpha : float
beta : float
kappa : float
delta : float
Returns
-------
float
alpha - (alpha - beta) * np.exp(-(kappa * x)**delta)
"""
return alpha - (alpha - beta) * np.exp(-(kappa * x)**delta)
all_models['weibull'] = weibull
model_para['weibull'] = [0.7, 0.1, 0.01, 1]
model_para_num['weibull'] = 4
def janoschek(x, a, beta, k, delta):
"""http://www.pisces-conservation.com/growthhelp/janoschek.htm
Parameters
----------
x : int
a : float
beta : float
k : float
delta : float
Returns
-------
float
a - (a - beta) * np.exp(-k*x**delta)
"""
return a - (a - beta) * np.exp(-k*x**delta)
all_models['janoschek'] = janoschek
model_para['janoschek'] = [0.73, 0.07, 0.355, 0.46]
model_para_num['janoschek'] = 4
| src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py | 5,090 | dr hill zero background
Parameters
----------
x : int
theta : float
eta : float
kappa : float
Returns
-------
float
(theta* x**eta) / (kappa**eta + x**eta)
exp4
Parameters
----------
x : int
c : float
a : float
b : float
alpha : float
Returns
-------
float
c - np.exp(-a*(x**alpha)+b)
ilog2
Parameters
----------
x : int
c : float
a : float
Returns
-------
float
c - a / np.log(x)
http://www.pisces-conservation.com/growthhelp/janoschek.htm
Parameters
----------
x : int
a : float
beta : float
k : float
delta : float
Returns
-------
float
a - (a - beta) * np.exp(-k*x**delta)
linear
Parameters
----------
x : int
a : float
b : float
Returns
-------
float
a*x + b
"logistic power
Parameters
----------
x : int
a : float
b : float
c : float
Returns
-------
float
a/(1.+(x/np.exp(b))**c)
logx linear
Parameters
----------
x : int
a : float
b : float
Returns
-------
float
a * np.log(x) + b
Morgan-Mercer-Flodin
http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm
Parameters
----------
x : int
alpha : float
beta : float
kappa : float
delta : float
Returns
-------
float
alpha - (alpha - beta) / (1. + (kappa * x)**delta)
pow3
Parameters
----------
x : int
c : float
a : float
alpha : float
Returns
-------
float
c - a * x**(-alpha)
pow4
Parameters
----------
x : int
alpha : float
a : float
b : float
c : float
Returns
-------
float
c - (a*x+b)**-alpha
Vapor pressure model
Parameters
----------
x : int
a : float
b : float
c : float
Returns
-------
float
np.exp(a+b/x+c*np.log(x))
Weibull model
http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm
Parameters
----------
x : int
alpha : float
beta : float
kappa : float
delta : float
Returns
-------
float
alpha - (alpha - beta) * np.exp(-(kappa * x)**delta)
A family of functions used by CurvefittingAssessor
Copyright (c) Microsoft Corporation. Licensed under the MIT license. | 1,959 | en | 0.075981 |
from ourstylePy import data
def our_colours(colours=[]):
'''
Extract hexcodes for our colours
If passed a sting, returns the matching hexcode.
If passed a list, returns a list of hexcodes.
Method from https://drsimonj.svbtle.com/creating-corporate-colour-palettes-for-ggplot2.
- colours, list of strings
Examples:
data.our_colours_raw
our_colours()
our_colours('green', 'blue', 'green')
our_colours('not a colour', 'also not a colour', 'green')
our_colors('blue')
'''
if len(colours) == 0:
return data.our_colours_raw
elif isinstance(colours, str):
return data.our_colours_raw[colours]
else:
return [data.our_colours_raw[i] for i in colours]
def our_colors(colours=[]):
'''
Alias for our_colours()
'''
return our_colours(colours)
| ourstylePy/our_colours.py | 837 | Alias for our_colours()
Extract hexcodes for our colours
If passed a sting, returns the matching hexcode.
If passed a list, returns a list of hexcodes.
Method from https://drsimonj.svbtle.com/creating-corporate-colour-palettes-for-ggplot2.
- colours, list of strings
Examples:
data.our_colours_raw
our_colours()
our_colours('green', 'blue', 'green')
our_colours('not a colour', 'also not a colour', 'green')
our_colors('blue') | 427 | en | 0.273035 |
from bayesianABTest import sampleSuccessRateForBinomial
from numpy import mean
def bestOfFive(A,B,C,D,E,F):
return mean( (A > B) & (A > C) & (A > D) & (A > E) & (A > F))
############# Example: Binomial Distribution #############
# Actual data for all cases
installs = [986,1013,959,968,1029,1014]
returns = [340,298,274,287,325,291]
A = sampleSuccessRateForBinomial(installs[0],returns[0])
B = sampleSuccessRateForBinomial(installs[1],returns[1])
C = sampleSuccessRateForBinomial(installs[2],returns[2])
D = sampleSuccessRateForBinomial(installs[3],returns[3])
E = sampleSuccessRateForBinomial(installs[4],returns[4])
F = sampleSuccessRateForBinomial(installs[5],returns[5])
A_best = bestOfFive(A,B,C,D,E,F)
B_best = bestOfFive(B,A,C,D,E,F)
C_best = bestOfFive(C,B,A,D,E,F)
D_best = bestOfFive(D,B,C,A,E,F)
E_best = bestOfFive(E,B,C,D,A,F)
F_best = bestOfFive(F,B,C,D,E,A)
# Get samples from the posterior
print "The probability of 20 being the best choice is {}".format(A_best)
print "The probability of 21 being the best choice is {}".format(B_best)
print "The probability of 22 being the best choice is {}".format(C_best)
print "The probability of 23 being the best choice is {}".format(D_best)
print "The probability of 24 being the best choice is {}".format(E_best)
print "The probability of 25 being the best choice is {}".format(F_best)
| code/examples/example_mikhail.py | 1,355 | Example: Binomial Distribution Actual data for all cases Get samples from the posterior | 88 | en | 0.239216 |
__version__ = "2.2.3"
# Work around to update TensorFlow's absl.logging threshold which alters the
# default Python logging output behavior when present.
# see: https://github.com/abseil/abseil-py/issues/99
# and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493
try:
import absl.logging
absl.logging.set_verbosity('info')
absl.logging.set_stderrthreshold('info')
absl.logging._warn_preinit_stderr = False
except:
pass
import logging
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# Files and general utilities
from .file_utils import (TRANSFORMERS_CACHE, PYTORCH_TRANSFORMERS_CACHE, PYTORCH_PRETRAINED_BERT_CACHE,
cached_path, add_start_docstrings, add_end_docstrings,
WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, CONFIG_NAME,
is_tf_available, is_torch_available)
from .data import (is_sklearn_available,
InputExample, InputFeatures, DataProcessor,
glue_output_modes, glue_convert_examples_to_features,
glue_processors, glue_tasks_num_labels,
xnli_output_modes, xnli_processors, xnli_tasks_num_labels,
squad_convert_examples_to_features, SquadFeatures,
SquadExample, SquadV1Processor, SquadV2Processor)
if is_sklearn_available():
from .data import glue_compute_metrics, xnli_compute_metrics
# ETRI modified ver
from .etri_tf_tokenization import FullTokenizer
# Tokenizers
from .tokenization_utils import (PreTrainedTokenizer)
from .tokenization_auto import AutoTokenizer
from .tokenization_bert import BertTokenizer, BasicTokenizer, WordpieceTokenizer
from .tokenization_bert_japanese import BertJapaneseTokenizer, MecabTokenizer, CharacterTokenizer
from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus)
from .tokenization_gpt2 import GPT2Tokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE
from .tokenization_xlm import XLMTokenizer
from .tokenization_roberta import RobertaTokenizer
from .tokenization_distilbert import DistilBertTokenizer
from .tokenization_albert import AlbertTokenizer
from .tokenization_camembert import CamembertTokenizer
from .tokenization_t5 import T5Tokenizer
# Configurations
from .configuration_utils import PretrainedConfig
from .configuration_auto import AutoConfig
from .configuration_bert import BertConfig, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_openai import OpenAIGPTConfig, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_transfo_xl import TransfoXLConfig, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_gpt2 import GPT2Config, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_ctrl import CTRLConfig, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_xlnet import XLNetConfig, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_xlm import XLMConfig, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_roberta import RobertaConfig, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_distilbert import DistilBertConfig, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_albert import AlbertConfig, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_camembert import CamembertConfig, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_t5 import T5Config, T5_PRETRAINED_CONFIG_ARCHIVE_MAP
# Modeling
if is_torch_available():
from .modeling_utils import (PreTrainedModel, prune_layer, Conv1D)
from .modeling_auto import (AutoModel, AutoModelForSequenceClassification, AutoModelForQuestionAnswering,
AutoModelWithLMHead)
from .modeling_bert import (BertPreTrainedModel, BertModel, BertForPreTraining,
BertForMaskedLM, BertForNextSentencePrediction,
BertForSequenceClassification, BertForMultipleChoice,
BertForTokenClassification, BertForQuestionAnswering,
load_tf_weights_in_bert, BERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_openai import (OpenAIGPTPreTrainedModel, OpenAIGPTModel,
OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel,
load_tf_weights_in_openai_gpt, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_transfo_xl import (TransfoXLPreTrainedModel, TransfoXLModel, TransfoXLLMHeadModel,
AdaptiveEmbedding,
load_tf_weights_in_transfo_xl, TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_gpt2 import (GPT2PreTrainedModel, GPT2Model,
GPT2LMHeadModel, GPT2DoubleHeadsModel,
load_tf_weights_in_gpt2, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_ctrl import (CTRLPreTrainedModel, CTRLModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_xlnet import (XLNetPreTrainedModel, XLNetModel, XLNetLMHeadModel,
XLNetForSequenceClassification, XLNetForTokenClassification,
XLNetForMultipleChoice, XLNetForQuestionAnsweringSimple,
XLNetForQuestionAnswering, load_tf_weights_in_xlnet,
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_xlm import (XLMPreTrainedModel , XLMModel,
XLMWithLMHeadModel, XLMForSequenceClassification,
XLMForQuestionAnswering, XLMForQuestionAnsweringSimple,
XLM_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_roberta import (RobertaForMaskedLM, RobertaModel,
RobertaForSequenceClassification, RobertaForMultipleChoice,
RobertaForTokenClassification,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_distilbert import (DistilBertPreTrainedModel, DistilBertForMaskedLM, DistilBertModel,
DistilBertForSequenceClassification, DistilBertForQuestionAnswering,
DistilBertForTokenClassification,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_camembert import (CamembertForMaskedLM, CamembertModel,
CamembertForSequenceClassification, CamembertForMultipleChoice,
CamembertForTokenClassification,
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_encoder_decoder import PreTrainedEncoderDecoder, Model2Model
from .modeling_t5 import (T5PreTrainedModel, T5Model, T5WithLMHeadModel,
load_tf_weights_in_t5,
T5_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_albert import (AlbertPreTrainedModel, AlbertModel, AlbertForMaskedLM, AlbertForSequenceClassification,
AlbertForQuestionAnswering,
load_tf_weights_in_albert, ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
# Optimization
from .optimization import (AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup)
# TensorFlow
if is_tf_available():
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, TFSequenceSummary, shape_list
from .modeling_tf_auto import (TFAutoModel, TFAutoModelForSequenceClassification, TFAutoModelForQuestionAnswering,
TFAutoModelWithLMHead)
from .modeling_tf_bert import (TFBertPreTrainedModel, TFBertMainLayer, TFBertEmbeddings,
TFBertModel, TFBertForPreTraining,
TFBertForMaskedLM, TFBertForNextSentencePrediction,
TFBertForSequenceClassification, TFBertForMultipleChoice,
TFBertForTokenClassification, TFBertForQuestionAnswering,
TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_gpt2 import (TFGPT2PreTrainedModel, TFGPT2MainLayer,
TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel,
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_openai import (TFOpenAIGPTPreTrainedModel, TFOpenAIGPTMainLayer,
TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel, TFOpenAIGPTDoubleHeadsModel,
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_transfo_xl import (TFTransfoXLPreTrainedModel, TFTransfoXLMainLayer,
TFTransfoXLModel, TFTransfoXLLMHeadModel,
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_xlnet import (TFXLNetPreTrainedModel, TFXLNetMainLayer,
TFXLNetModel, TFXLNetLMHeadModel,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetForQuestionAnsweringSimple,
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_xlm import (TFXLMPreTrainedModel, TFXLMMainLayer,
TFXLMModel, TFXLMWithLMHeadModel,
TFXLMForSequenceClassification,
TFXLMForQuestionAnsweringSimple,
TF_XLM_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_roberta import (TFRobertaPreTrainedModel, TFRobertaMainLayer,
TFRobertaModel, TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_distilbert import (TFDistilBertPreTrainedModel, TFDistilBertMainLayer,
TFDistilBertModel, TFDistilBertForMaskedLM,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForQuestionAnswering,
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_ctrl import (TFCTRLPreTrainedModel, TFCTRLModel,
TFCTRLLMHeadModel,
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_albert import (TFAlbertPreTrainedModel, TFAlbertModel, TFAlbertForMaskedLM,
TFAlbertForSequenceClassification,
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_t5 import (TFT5PreTrainedModel, TFT5Model, TFT5WithLMHeadModel,
TF_T5_PRETRAINED_MODEL_ARCHIVE_MAP)
# Optimization
from .optimization_tf import (WarmUp, create_optimizer, AdamWeightDecay, GradientAccumulator)
# TF 2.0 <=> PyTorch conversion utilities
from .modeling_tf_pytorch_utils import (convert_tf_weight_name_to_pt_weight_name,
load_pytorch_checkpoint_in_tf2_model,
load_pytorch_weights_in_tf2_model,
load_pytorch_model_in_tf2_model,
load_tf2_checkpoint_in_pytorch_model,
load_tf2_weights_in_pytorch_model,
load_tf2_model_in_pytorch_model)
if not is_tf_available() and not is_torch_available():
logger.warning("Neither PyTorch nor TensorFlow >= 2.0 have been found."
"Models won't be available and only tokenizers, configuration"
"and file/data utilities can be used.")
| transformers/__init__.py | 12,478 | Work around to update TensorFlow's absl.logging threshold which alters the default Python logging output behavior when present. see: https://github.com/abseil/abseil-py/issues/99 and: https://github.com/tensorflow/tensorflow/issues/26691issuecomment-500369493 pylint: disable=invalid-name Files and general utilities ETRI modified ver Tokenizers Configurations Modeling Optimization TensorFlow Optimization TF 2.0 <=> PyTorch conversion utilities | 446 | en | 0.660837 |
from ..http import dump_header
from ..http import parse_set_header
from ..utils import environ_property
from ..utils import header_property
class CORSRequestMixin(object):
"""A mixin for :class:`~werkzeug.wrappers.BaseRequest` subclasses
that adds descriptors for Cross Origin Resource Sharing (CORS)
headers.
.. versionadded:: 1.0
"""
origin = environ_property(
"HTTP_ORIGIN",
doc=(
"The host that the request originated from. Set"
" :attr:`~CORSResponseMixin.access_control_allow_origin` on"
" the response to indicate which origins are allowed."
),
)
access_control_request_headers = environ_property(
"HTTP_ACCESS_CONTROL_REQUEST_HEADERS",
load_func=parse_set_header,
doc=(
"Sent with a preflight request to indicate which headers"
" will be sent with the cross origin request. Set"
" :attr:`~CORSResponseMixin.access_control_allow_headers`"
" on the response to indicate which headers are allowed."
),
)
access_control_request_method = environ_property(
"HTTP_ACCESS_CONTROL_REQUEST_METHOD",
doc=(
"Sent with a preflight request to indicate which method"
" will be used for the cross origin request. Set"
" :attr:`~CORSResponseMixin.access_control_allow_methods`"
" on the response to indicate which methods are allowed."
),
)
class CORSResponseMixin(object):
"""A mixin for :class:`~werkzeug.wrappers.BaseResponse` subclasses
that adds descriptors for Cross Origin Resource Sharing (CORS)
headers.
.. versionadded:: 1.0
"""
@property
def access_control_allow_credentials(self):
"""Whether credentials can be shared by the browser to
JavaScript code. As part of the preflight request it indicates
whether credentials can be used on the cross origin request.
"""
return "Access-Control-Allow-Credentials" in self.headers
@access_control_allow_credentials.setter
def access_control_allow_credentials(self, value):
if value is True:
self.headers["Access-Control-Allow-Credentials"] = "true"
else:
self.headers.pop("Access-Control-Allow-Credentials", None)
access_control_allow_headers = header_property(
"Access-Control-Allow-Headers",
load_func=parse_set_header,
dump_func=dump_header,
doc="Which headers can be sent with the cross origin request.",
)
access_control_allow_methods = header_property(
"Access-Control-Allow-Methods",
load_func=parse_set_header,
dump_func=dump_header,
doc="Which methods can be used for the cross origin request.",
)
access_control_allow_origin = header_property(
"Access-Control-Allow-Origin",
load_func=parse_set_header,
dump_func=dump_header,
doc="The origins that may make cross origin requests.",
)
access_control_expose_headers = header_property(
"Access-Control-Expose-Headers",
load_func=parse_set_header,
dump_func=dump_header,
doc="Which headers can be shared by the browser to JavaScript code.",
)
access_control_max_age = header_property(
"Access-Control-Max-Age",
load_func=int,
dump_func=str,
doc="The maximum age in seconds the access control settings can be cached for.",
)
| venv/Lib/site-packages/werkzeug/wrappers/cors.py | 3,512 | A mixin for :class:`~werkzeug.wrappers.BaseRequest` subclasses
that adds descriptors for Cross Origin Resource Sharing (CORS)
headers.
.. versionadded:: 1.0
A mixin for :class:`~werkzeug.wrappers.BaseResponse` subclasses
that adds descriptors for Cross Origin Resource Sharing (CORS)
headers.
.. versionadded:: 1.0
Whether credentials can be shared by the browser to
JavaScript code. As part of the preflight request it indicates
whether credentials can be used on the cross origin request. | 492 | en | 0.627484 |
import os
import RPi.GPIO as gpio
import time
import random
from mesafe import distance
motorhizi = 2.5
hiz = 100
aci2 = aci3 = aci4 = 6
aci = 5.5
in4 = 26
in3 = 4
in2 = 12
in1 = 8
solled = 9
sagled = 11
gpio.setwarnings(False)
def init():
gpio.setwarnings(False)
gpio.setmode(gpio.BCM)
gpio.setup(22,gpio.OUT)
gpio.setup(27,gpio.OUT)
gpio.setup(17,gpio.OUT)
gpio.setup(18,gpio.OUT)
gpio.setup(in4,gpio.OUT)
gpio.setup(in3,gpio.OUT)
gpio.setup(in2,gpio.OUT)
gpio.setup(in1,gpio.OUT)
gpio.setup(21,gpio.OUT)
gpio.setup(solled,gpio.OUT)
gpio.setup(sagled,gpio.OUT)
gpio.setup(23,gpio.IN)
gpio.setup(24,gpio.IN)
gpio.output(22,0)
gpio.output(18,0)
gpio.output(17,0)
gpio.output(27,0)
gpio.output(in4,0)
gpio.output(in3,0)
gpio.output(in2,0)
gpio.output(in1,0)
gpio.output(21,0)
gpio.output(solled,0)
gpio.output(sagled,0)
def ileri(tf,ff):
init()
gpio.output(17,0)
gpio.output(22,0)
ip = gpio.PWM(27,50)
ip2 = gpio.PWM(18,50)
ip.start(ff)
ip2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def geri(tf,ff):
init()
gpio.output(18,0)
gpio.output(27,0)
gp = gpio.PWM(22,50)
gp2 = gpio.PWM(17,50)
gp.start(ff)
gp2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def sol(tf,ff):
init()
gpio.output(17,0)
gpio.output(27,0)
sp = gpio.PWM(22,50)
sp2 = gpio.PWM(18,50)
sp.start(ff)
sp2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def sag(tf,ff):
init()
gpio.output(18,0)
gpio.output(22,0)
sap = gpio.PWM(27,50)
sap2 = gpio.PWM(17,50)
sap.start(ff)
sap2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def dur():
init()
gpio.output(22,0)
gpio.output(17,0)
gpio.output(18,0)
gpio.output(27,0)
gpio.cleanup()
def adim1(tf,y):
init()
if (y == 1): # sol
gpio.output(in1,1)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,0)
if (y == 0): # sag
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,1)
time.sleep(tf)
gpio.cleanup()
def adim2(tf,y):
init()
if (y == 1): # sol
gpio.output(in1,0)
gpio.output(in2,1)
gpio.output(in3,0)
gpio.output(in4,0)
if (y == 0): # sag
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,1)
gpio.output(in4,0)
time.sleep(tf)
gpio.cleanup()
def adim3(tf,y):
init()
if (y == 1): # sol
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,1)
gpio.output(in4,0)
if (y == 0): # sag
gpio.output(in1,0)
gpio.output(in2,1)
gpio.output(in3,0)
gpio.output(in4,0)
time.sleep(tf)
gpio.cleanup()
def adim4(tf,y):
init()
if (y == 1): # sol
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,1)
if (y == 0): # sag
gpio.output(in1,1)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,0)
time.sleep(tf)
gpio.cleanup()
def stepper(tf,ff,yf):
ff = float(ff)
ff = ff / 1000
if (yf == 0): # sag
for i in range(0,tf):
adim1(ff,0)
adim2(ff,0)
adim3(ff,0)
adim4(ff,0)
if (yf == 1): # sol
for i in range(0,tf):
adim1(ff,1)
adim2(ff,1)
adim3(ff,1)
adim4(ff,1)
def servo(tf):
gpio.setmode(gpio.BCM)
gpio.setup(5,gpio.OUT)
p = gpio.PWM(5,50)
p.start(5.5)
p.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def servo2(tf):
gpio.setmode(gpio.BCM)
gpio.setup(6,gpio.OUT)
p2 = gpio.PWM(6,50)
p2.start(6)
p2.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def servo3(tf):
gpio.setmode(gpio.BCM)
gpio.setup(20,gpio.OUT)
p3 = gpio.PWM(20,50)
p3.start(6)
p3.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def servo4(tf):
gpio.setmode(gpio.BCM)
gpio.setup(16,gpio.OUT)
p3 = gpio.PWM(16,50)
p3.start(6)
p3.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def ses(tf,ff):
init()
sp = gpio.PWM(21,ff)
sp.start(70)
time.sleep(tf)
gpio.cleanup()
def led(ff,tf,sf):
init()
sp = gpio.PWM(solled,500)
sap = gpio.PWM(sagled,500)
if (sf == 0):
sp.start(ff)
time.sleep(tf)
gpio.cleanup()
elif (sf == 1):
sap.start(ff)
time.sleep(tf)
gpio.cleanup()
elif (sf == 2):
sp.start(ff)
sap.start(ff)
time.sleep(tf)
gpio.cleanup()
def kapat():
os.system("pkill -9 -f main.py")
def kontrol():
x = random.randrange(1,3)
if (x == 1):
print ("sagabak")
servo(3)
time.sleep(0.05)
dis = distance('cm')
print (dis)
if dis < 15:
print ("solabak")
servo(9)
dis = distance('cm')
if dis < 15:
print ("cik")
servo(5.5)
geri(2,hiz)
else:
servo(5.5)
geri(0.5,hiz)
sol(0.7,hiz)
else:
servo(5.5)
geri(0.5,hiz)
sag(0.7,hiz)
if (x == 2):
print ("solabak")
servo(9)
time.sleep(0.05)
dis = distance('cm')
print (dis)
if dis < 15:
print ("sagabak")
servo(3)
dis = distance('cm')
if dis < 15:
print ("cik")
servo(5.5)
geri(2,hiz)
else:
servo(5.5)
geri(0.5,hiz)
sag(0.7,hiz)
else:
servo(5.5)
geri(0.5,hiz)
sol(0.7,hiz)
print (" ")
print ("otonomgorev yazilimi google speech api sesli komutlari ile robotun otonom hareket etmesi icin yazilmistir")
print (" ")
time.sleep(1)
def cizgi(rf):
for i in range(0,rf):
dis = distance('cm')
init()
if (gpio.input(23) == 0 and gpio.input(24) == 0):
ileri(0.1,hiz)
elif (gpio.input(23) == 1 and gpio.input(24) == 0):
sol(0.1,hiz)
elif (gpio.input(23) == 0 and gpio.input(24) == 1):
sag(0.1,hiz)
else:
pass
if dis < 15:
print ("cok dar",dis)
geri(0.5,hiz)
servo(5.5)
kontrol()
elif dis < 25:
print ("dar",dis)
else:
print ("temiz",dis)
dur()
aci2 = aci3 = aci4 = 6
aci = 5.5
| Robotics/src/otonomgorev.py | 6,901 | sol sag sol sag sol sag sol sag sag sol | 39 | de | 0.211926 |
import argparse
parser = argparse.ArgumentParser(description="PyTorch implementation of action recognition models")
parser.add_argument('--dataset', type=str, choices=['somethingv1','somethingv2','diving48'],
default = 'somethingv1')
parser.add_argument('--root_path', type = str, default = '../',
help = 'root path to video dataset folders')
parser.add_argument('--store_name', type=str, default="")
# ========================= Model Configs ==========================
parser.add_argument('--type', type=str, default="GST",choices=['GST','R3D','S3D', 'I3D'],
help = 'type of temporal models, currently support GST,Res3D and S3D')
parser.add_argument('--arch', type=str, default="resnet50",choices=['resnet50','resnet101'],
help = 'backbone networks, currently only support resnet')
parser.add_argument('--num_segments', type=int, default=8)
parser.add_argument('--alpha', type=int, default=4, help = 'spatial temporal split for output channels')
parser.add_argument('--beta', type=int, default=2, choices=[1,2], help = 'channel splits for input channels, 1 for GST-Large and 2 for GST')
# ========================= Learning Configs ==========================
parser.add_argument('--epochs', default=70, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batch-size', default=24, type=int,
metavar='N', help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--lr_steps', default=[50, 60], type=float, nargs="+",
metavar='LRSteps', help='epochs to decay learning rate by 10')
parser.add_argument('--dropout', '--dp', default=0.3, type=float,
metavar='dp', help='dropout ratio')
parser.add_argument('--warm', default=5, type=float, help='warm up epochs')
#========================= Optimizer Configs ==========================
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight_decay', '--wd', default=3e-4, type=float,
metavar='W', help='weight decay (default: 3e-4)')
parser.add_argument('--clip-gradient', '--gd', default=20, type=float,
metavar='W', help='gradient norm clipping (default: 20)')
# ========================= Monitor Configs ==========================
parser.add_argument('--print-freq', '-p', default=20, type=int,
metavar='N', help='print frequency (default: 20)')
parser.add_argument('--eval-freq', '-ef', default=1, type=int,
metavar='N', help='evaluation frequency (default: 1)')
# ========================= Runtime Configs ==========================
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers (default: 16)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--checkpoint_dir',type=str, required=True,
help = 'folder to restore checkpoint and training log')
# ========================= Added by Qihang ==========================
parser.add_argument('--op_code', type=str, default="conv3d", help='op code to use')
parser.add_argument('--sparsity-regularization', '-sr', dest='sr', action='store_true',
help='train with channel sparsity regularization')
parser.add_argument('--s', type=float, default=0.0001,
help='scale sparse rate (default: 0.0001)')
parser.add_argument('--conv_config', type=str, default='',
help='conv config')
parser.add_argument('--search', action='store_true', default=False,
help='search mode')
parser.add_argument('--prune', action='store_true', default=False,
help='prune after training')
parser.add_argument('--prune_model_path', type=str, default='',
help='model to prune')
parser.add_argument('--reweight', action='store_true', default=False,
help='reweight the prune factor')
parser.add_argument('--finetune', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
| opts.py | 4,708 | ========================= Model Configs ========================== ========================= Learning Configs =================================================== Optimizer Configs ========================== ========================= Monitor Configs ========================== ========================= Runtime Configs ========================== ========================= Added by Qihang ========================== | 413 | en | 0.466281 |
from typing import List, Tuple, Optional
import aiosqlite
from spare.types.blockchain_format.sized_bytes import bytes32
from spare.util.db_wrapper import DBWrapper
class WalletInterestedStore:
"""
Stores coin ids that we are interested in receiving
"""
db_connection: aiosqlite.Connection
db_wrapper: DBWrapper
@classmethod
async def create(cls, wrapper: DBWrapper):
self = cls()
self.db_connection = wrapper.db
self.db_wrapper = wrapper
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute("pragma synchronous=2")
await self.db_connection.execute("CREATE TABLE IF NOT EXISTS interested_coins(coin_name text PRIMARY KEY)")
await self.db_connection.execute(
"CREATE TABLE IF NOT EXISTS interested_puzzle_hashes(puzzle_hash text PRIMARY KEY, wallet_id integer)"
)
await self.db_connection.commit()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM puzzle_hashes")
await cursor.close()
cursor = await self.db_connection.execute("DELETE FROM interested_coins")
await cursor.close()
await self.db_connection.commit()
async def get_interested_coin_ids(self) -> List[bytes32]:
cursor = await self.db_connection.execute("SELECT coin_name FROM interested_coins")
rows_hex = await cursor.fetchall()
return [bytes32(bytes.fromhex(row[0])) for row in rows_hex]
async def add_interested_coin_id(self, coin_id: bytes32, in_transaction: bool = False) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO interested_coins VALUES (?)", (coin_id.hex(),)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def get_interested_puzzle_hashes(self) -> List[Tuple[bytes32, int]]:
cursor = await self.db_connection.execute("SELECT puzzle_hash, wallet_id FROM interested_puzzle_hashes")
rows_hex = await cursor.fetchall()
return [(bytes32(bytes.fromhex(row[0])), row[1]) for row in rows_hex]
async def get_interested_puzzle_hash_wallet_id(self, puzzle_hash: bytes32) -> Optional[int]:
cursor = await self.db_connection.execute(
"SELECT wallet_id FROM interested_puzzle_hashes WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
row = await cursor.fetchone()
if row is None:
return None
return row[0]
async def add_interested_puzzle_hash(
self, puzzle_hash: bytes32, wallet_id: int, in_transaction: bool = False
) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO interested_puzzle_hashes VALUES (?, ?)", (puzzle_hash.hex(), wallet_id)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def remove_interested_puzzle_hash(self, puzzle_hash: bytes32, in_transaction: bool = False) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"DELETE FROM interested_puzzle_hashes WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
| spare/wallet/wallet_interested_store.py | 3,908 | Stores coin ids that we are interested in receiving | 51 | en | 0.986597 |
"""
Client for Yandex.Disk.
"""
__version__ = '0.0.1'
| project_backup/__init__.py | 54 | Client for Yandex.Disk. | 23 | en | 0.408967 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['StorageInsightConfigArgs', 'StorageInsightConfig']
@pulumi.input_type
class StorageInsightConfigArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
storage_account: pulumi.Input['StorageAccountArgs'],
workspace_name: pulumi.Input[str],
containers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
storage_insight_name: Optional[pulumi.Input[str]] = None,
tables: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a StorageInsightConfig resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input['StorageAccountArgs'] storage_account: The storage account connection details
:param pulumi.Input[str] workspace_name: The name of the workspace.
:param pulumi.Input[Sequence[pulumi.Input[str]]] containers: The names of the blob containers that the workspace should read
:param pulumi.Input[str] e_tag: The ETag of the storage insight.
:param pulumi.Input[str] storage_insight_name: Name of the storageInsightsConfigs resource
:param pulumi.Input[Sequence[pulumi.Input[str]]] tables: The names of the Azure tables that the workspace should read
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "storage_account", storage_account)
pulumi.set(__self__, "workspace_name", workspace_name)
if containers is not None:
pulumi.set(__self__, "containers", containers)
if e_tag is not None:
pulumi.set(__self__, "e_tag", e_tag)
if storage_insight_name is not None:
pulumi.set(__self__, "storage_insight_name", storage_insight_name)
if tables is not None:
pulumi.set(__self__, "tables", tables)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="storageAccount")
def storage_account(self) -> pulumi.Input['StorageAccountArgs']:
"""
The storage account connection details
"""
return pulumi.get(self, "storage_account")
@storage_account.setter
def storage_account(self, value: pulumi.Input['StorageAccountArgs']):
pulumi.set(self, "storage_account", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
"""
The name of the workspace.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter
def containers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The names of the blob containers that the workspace should read
"""
return pulumi.get(self, "containers")
@containers.setter
def containers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "containers", value)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[pulumi.Input[str]]:
"""
The ETag of the storage insight.
"""
return pulumi.get(self, "e_tag")
@e_tag.setter
def e_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "e_tag", value)
@property
@pulumi.getter(name="storageInsightName")
def storage_insight_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the storageInsightsConfigs resource
"""
return pulumi.get(self, "storage_insight_name")
@storage_insight_name.setter
def storage_insight_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_insight_name", value)
@property
@pulumi.getter
def tables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The names of the Azure tables that the workspace should read
"""
return pulumi.get(self, "tables")
@tables.setter
def tables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tables", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class StorageInsightConfig(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
containers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account: Optional[pulumi.Input[pulumi.InputType['StorageAccountArgs']]] = None,
storage_insight_name: Optional[pulumi.Input[str]] = None,
tables: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The top level storage insight resource container.
API Version: 2020-08-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] containers: The names of the blob containers that the workspace should read
:param pulumi.Input[str] e_tag: The ETag of the storage insight.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[pulumi.InputType['StorageAccountArgs']] storage_account: The storage account connection details
:param pulumi.Input[str] storage_insight_name: Name of the storageInsightsConfigs resource
:param pulumi.Input[Sequence[pulumi.Input[str]]] tables: The names of the Azure tables that the workspace should read
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StorageInsightConfigArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The top level storage insight resource container.
API Version: 2020-08-01.
:param str resource_name: The name of the resource.
:param StorageInsightConfigArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StorageInsightConfigArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
containers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account: Optional[pulumi.Input[pulumi.InputType['StorageAccountArgs']]] = None,
storage_insight_name: Optional[pulumi.Input[str]] = None,
tables: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StorageInsightConfigArgs.__new__(StorageInsightConfigArgs)
__props__.__dict__["containers"] = containers
__props__.__dict__["e_tag"] = e_tag
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if storage_account is None and not opts.urn:
raise TypeError("Missing required property 'storage_account'")
__props__.__dict__["storage_account"] = storage_account
__props__.__dict__["storage_insight_name"] = storage_insight_name
__props__.__dict__["tables"] = tables
__props__.__dict__["tags"] = tags
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["name"] = None
__props__.__dict__["status"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:operationalinsights:StorageInsightConfig"), pulumi.Alias(type_="azure-native:operationalinsights/v20150320:StorageInsightConfig"), pulumi.Alias(type_="azure-nextgen:operationalinsights/v20150320:StorageInsightConfig"), pulumi.Alias(type_="azure-native:operationalinsights/v20200301preview:StorageInsightConfig"), pulumi.Alias(type_="azure-nextgen:operationalinsights/v20200301preview:StorageInsightConfig"), pulumi.Alias(type_="azure-native:operationalinsights/v20200801:StorageInsightConfig"), pulumi.Alias(type_="azure-nextgen:operationalinsights/v20200801:StorageInsightConfig")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(StorageInsightConfig, __self__).__init__(
'azure-native:operationalinsights:StorageInsightConfig',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'StorageInsightConfig':
"""
Get an existing StorageInsightConfig resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StorageInsightConfigArgs.__new__(StorageInsightConfigArgs)
__props__.__dict__["containers"] = None
__props__.__dict__["e_tag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["status"] = None
__props__.__dict__["storage_account"] = None
__props__.__dict__["tables"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return StorageInsightConfig(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def containers(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The names of the blob containers that the workspace should read
"""
return pulumi.get(self, "containers")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
"""
The ETag of the storage insight.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.StorageInsightStatusResponse']:
"""
The status of the storage insight
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="storageAccount")
def storage_account(self) -> pulumi.Output['outputs.StorageAccountResponse']:
"""
The storage account connection details
"""
return pulumi.get(self, "storage_account")
@property
@pulumi.getter
def tables(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The names of the Azure tables that the workspace should read
"""
return pulumi.get(self, "tables")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py | 14,960 | The set of arguments for constructing a StorageInsightConfig resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input['StorageAccountArgs'] storage_account: The storage account connection details
:param pulumi.Input[str] workspace_name: The name of the workspace.
:param pulumi.Input[Sequence[pulumi.Input[str]]] containers: The names of the blob containers that the workspace should read
:param pulumi.Input[str] e_tag: The ETag of the storage insight.
:param pulumi.Input[str] storage_insight_name: Name of the storageInsightsConfigs resource
:param pulumi.Input[Sequence[pulumi.Input[str]]] tables: The names of the Azure tables that the workspace should read
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
The top level storage insight resource container.
API Version: 2020-08-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] containers: The names of the blob containers that the workspace should read
:param pulumi.Input[str] e_tag: The ETag of the storage insight.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[pulumi.InputType['StorageAccountArgs']] storage_account: The storage account connection details
:param pulumi.Input[str] storage_insight_name: Name of the storageInsightsConfigs resource
:param pulumi.Input[Sequence[pulumi.Input[str]]] tables: The names of the Azure tables that the workspace should read
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] workspace_name: The name of the workspace.
The top level storage insight resource container.
API Version: 2020-08-01.
:param str resource_name: The name of the resource.
:param StorageInsightConfigArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
The names of the blob containers that the workspace should read
The names of the blob containers that the workspace should read
The ETag of the storage insight.
The ETag of the storage insight.
Get an existing StorageInsightConfig resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
The name of the resource
The name of the resource group. The name is case insensitive.
The status of the storage insight
The storage account connection details
The storage account connection details
Name of the storageInsightsConfigs resource
The names of the Azure tables that the workspace should read
The names of the Azure tables that the workspace should read
Resource tags.
Resource tags.
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
The name of the workspace.
coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! *** | 3,295 | en | 0.627615 |
# This file will consist of some wrapper for using MySQL
# It is mainly used for preparing and calling mysql cli
import logging
from mysql_autoxtrabackup.general_conf import path_config
from mysql_autoxtrabackup.general_conf.generalops import GeneralClass
from mysql_autoxtrabackup.process_runner.process_runner import ProcessRunner
logger = logging.getLogger(__name__)
class MySQLClientHelper:
def __init__(self, config: str = path_config.config_path_file):
self.conf = config
# Using Composition instead of Inheritance here
options_obj = GeneralClass(config=self.conf)
self.mysql_options = options_obj.mysql_options
def create_mysql_client_command(self, statement: str) -> str:
command_connection = "{} --defaults-file={} -u{} --password={}".format(
self.mysql_options.get("mysql"),
self.mysql_options.get("mycnf"),
self.mysql_options.get("mysql_user"),
self.mysql_options.get("mysql_password"),
)
command_execute = ' -e "{}"'
if self.mysql_options.get("mysql_socket"):
command_connection += " --socket={}"
new_command = command_connection.format(
self.mysql_options.get("mysql_socket")
)
else:
command_connection += " --host={} --port={}"
new_command = command_connection.format(
self.mysql_options.get("mysql_host"),
self.mysql_options.get("mysql_port"),
)
new_command += command_execute
return new_command.format(statement)
def mysql_run_command(self, statement: str) -> bool:
command = self.create_mysql_client_command(statement=statement)
return ProcessRunner.run_command(command)
| mysql_autoxtrabackup/utils/mysql_cli.py | 1,778 | This file will consist of some wrapper for using MySQL It is mainly used for preparing and calling mysql cli Using Composition instead of Inheritance here | 154 | en | 0.877482 |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cirq
from cirq.contrib.paulistring import (
optimized_circuit,
)
def test_optimize():
q0, q1, q2 = cirq.LineQubit.range(3)
c_orig = cirq.Circuit.from_ops(
cirq.X(q0) ** 0.5,
cirq.X(q1),
cirq.CZ(q1, q2),
cirq.X(q2) ** 0.125,
cirq.Z(q1) ** 0.5,
cirq.Y(q1) ** 0.5,
cirq.CZ(q0, q1),
cirq.Z(q1) ** 0.5,
cirq.CZ(q1, q2),
cirq.Z(q1) ** 0.5,
cirq.X(q2) ** 0.875,
cirq.CZ(q1, q2),
cirq.X(q2) ** 0.125,
)
cirq.testing.assert_has_diagram(c_orig, """
0: ───X^0.5─────────────────────────@───────────────────────────────────
│
1: ───X───────@───S─────────Y^0.5───@───S───@───S─────────@─────────────
│ │ │
2: ───────────@───X^0.125───────────────────@───X^0.875───@───X^0.125───
""")
c_opt = optimized_circuit(c_orig)
cirq.testing.assert_allclose_up_to_global_phase(
c_orig.to_unitary_matrix(),
c_opt.to_unitary_matrix(),
atol=1e-7,
)
cirq.testing.assert_has_diagram(c_opt, """
0: ───X^0.5────────────@────────────────────────────────────────
│
1: ───@───────X^-0.5───@───@────────────────@───Z^-0.5──────────
│ │ │
2: ───@────────────────────@───[X]^-0.875───@───[X]^-0.25───Z───
""")
def test_optimize_large_circuit():
q0, q1, q2 = cirq.LineQubit.range(3)
c_orig = cirq.testing.nonoptimal_toffoli_circuit(q0, q1, q2)
c_opt = optimized_circuit(c_orig)
cirq.testing.assert_allclose_up_to_global_phase(
c_orig.to_unitary_matrix(),
c_opt.to_unitary_matrix(),
atol=1e-7,
)
assert sum(1 for op in c_opt.all_operations()
if isinstance(op, cirq.GateOperation)
and isinstance(op.gate, cirq.CZPowGate)) == 10
def test_repeat_limit():
q0, q1, q2 = cirq.LineQubit.range(3)
c_orig = cirq.testing.nonoptimal_toffoli_circuit(q0, q1, q2)
c_opt = optimized_circuit(c_orig, repeat=1)
cirq.testing.assert_allclose_up_to_global_phase(
c_orig.to_unitary_matrix(),
c_opt.to_unitary_matrix(),
atol=1e-7,
)
assert sum(1 for op in c_opt.all_operations()
if isinstance(op, cirq.GateOperation)
and isinstance(op.gate, cirq.CZPowGate)) >= 10
| cirq/contrib/paulistring/optimize_test.py | 3,619 | Copyright 2018 The Cirq Developers Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 557 | en | 0.866863 |
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements 1D cubic Hermite spline interpolation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
def interpolate1d(x, values, tangents):
r"""Perform cubic hermite spline interpolation on a 1D spline.
The x coordinates of the spline knots are at [0 : 1 : len(values)-1].
Queries outside of the range of the spline are computed using linear
extrapolation. See https://en.wikipedia.org/wiki/Cubic_Hermite_spline
for details, where "x" corresponds to `x`, "p" corresponds to `values`, and
"m" corresponds to `tangents`.
Args:
x: A tensor of any size of single or double precision floats containing the
set of values to be used for interpolation into the spline.
values: A vector of single or double precision floats containing the value
of each knot of the spline being interpolated into. Must be the same
length as `tangents` and the same type as `x`.
tangents: A vector of single or double precision floats containing the
tangent (derivative) of each knot of the spline being interpolated into.
Must be the same length as `values` and the same type as `x`.
Returns:
The result of interpolating along the spline defined by `values`, and
`tangents`, using `x` as the query values. Will be the same length and type
as `x`.
"""
# if x.dtype == 'float64' or torch.as_tensor(x).dtype == torch.float64:
# float_dtype = torch.float64
# else:
# float_dtype = torch.float32
# x = torch.as_tensor(x, dtype=float_dtype)
# values = torch.as_tensor(values, dtype=float_dtype)
# tangents = torch.as_tensor(tangents, dtype=float_dtype)
assert torch.is_tensor(x)
assert torch.is_tensor(values)
assert torch.is_tensor(tangents)
float_dtype = x.dtype
assert values.dtype == float_dtype
assert tangents.dtype == float_dtype
assert len(values.shape) == 1
assert len(tangents.shape) == 1
assert values.shape[0] == tangents.shape[0]
x_lo = torch.floor(torch.clamp(x, torch.as_tensor(0),
values.shape[0] - 2)).type(torch.int64)
x_hi = x_lo + 1
# Compute the relative distance between each `x` and the knot below it.
t = x - x_lo.type(float_dtype)
# Compute the cubic hermite expansion of `t`.
t_sq = t**2
t_cu = t * t_sq
h01 = -2. * t_cu + 3. * t_sq
h00 = 1. - h01
h11 = t_cu - t_sq
h10 = h11 - t_sq + t
# Linearly extrapolate above and below the extents of the spline for all
# values.
value_before = tangents[0] * t + values[0]
value_after = tangents[-1] * (t - 1.) + values[-1]
# Cubically interpolate between the knots below and above each query point.
neighbor_values_lo = values[x_lo]
neighbor_values_hi = values[x_hi]
neighbor_tangents_lo = tangents[x_lo]
neighbor_tangents_hi = tangents[x_hi]
value_mid = (
neighbor_values_lo * h00 + neighbor_values_hi * h01 +
neighbor_tangents_lo * h10 + neighbor_tangents_hi * h11)
# Return the interpolated or extrapolated values for each query point,
# depending on whether or not the query lies within the span of the spline.
return torch.where(t < 0., value_before,
torch.where(t > 1., value_after, value_mid))
| pioneer/robust_loss_pytorch/cubic_spline.py | 3,856 | Perform cubic hermite spline interpolation on a 1D spline.
The x coordinates of the spline knots are at [0 : 1 : len(values)-1].
Queries outside of the range of the spline are computed using linear
extrapolation. See https://en.wikipedia.org/wiki/Cubic_Hermite_spline
for details, where "x" corresponds to `x`, "p" corresponds to `values`, and
"m" corresponds to `tangents`.
Args:
x: A tensor of any size of single or double precision floats containing the
set of values to be used for interpolation into the spline.
values: A vector of single or double precision floats containing the value
of each knot of the spline being interpolated into. Must be the same
length as `tangents` and the same type as `x`.
tangents: A vector of single or double precision floats containing the
tangent (derivative) of each knot of the spline being interpolated into.
Must be the same length as `values` and the same type as `x`.
Returns:
The result of interpolating along the spline defined by `values`, and
`tangents`, using `x` as the query values. Will be the same length and type
as `x`.
Implements 1D cubic Hermite spline interpolation.
coding=utf-8 Copyright 2019 The Google Research Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. if x.dtype == 'float64' or torch.as_tensor(x).dtype == torch.float64: float_dtype = torch.float64 else: float_dtype = torch.float32 x = torch.as_tensor(x, dtype=float_dtype) values = torch.as_tensor(values, dtype=float_dtype) tangents = torch.as_tensor(tangents, dtype=float_dtype) Compute the relative distance between each `x` and the knot below it. Compute the cubic hermite expansion of `t`. Linearly extrapolate above and below the extents of the spline for all values. Cubically interpolate between the knots below and above each query point. Return the interpolated or extrapolated values for each query point, depending on whether or not the query lies within the span of the spline. | 2,438 | en | 0.772752 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteTrainingPipeline
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1_generated_PipelineService_DeleteTrainingPipeline_sync]
from google.cloud import aiplatform_v1
def sample_delete_training_pipeline():
# Create a client
client = aiplatform_v1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteTrainingPipelineRequest(
name="name_value",
)
# Make the request
operation = client.delete_training_pipeline(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END aiplatform_v1_generated_PipelineService_DeleteTrainingPipeline_sync]
| samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_training_pipeline_sync.py | 1,607 | -*- coding: utf-8 -*- Copyright 2022 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Generated code. DO NOT EDIT! Snippet for DeleteTrainingPipeline NOTE: This snippet has been automatically generated for illustrative purposes only. It may require modifications to work in your environment. To install the latest published package dependency, execute the following: python3 -m pip install google-cloud-aiplatform [START aiplatform_v1_generated_PipelineService_DeleteTrainingPipeline_sync] Create a client Initialize request argument(s) Make the request Handle the response [END aiplatform_v1_generated_PipelineService_DeleteTrainingPipeline_sync] | 1,133 | en | 0.833397 |
'''
file.readline() 사용해서 csv 파일 열기
'''
#
# def my_csv_reader(fn:str, header=True) -> list:
# '''
# csv 파일의 데이터 2차원 행렬 형태로 리턴
#
#
# :param fn: 읽을 파일 이름(예: data\\exam.csv)
# :param header: csv파일의 헤더 존재 여부
# :return: csv 파일에서 헤더는 제외한 데이터로 이루어진 2차원 리스트
# '''
#
#
# if __name__ == '__main__':
#
# # 작성한 함수들을 테스트
# pass
def print_data(data: list) -> None:
'''
2차원 리스트의 내용을 출력
1 10 20 30 40
2 11 21 31 41
...
:param data: 2차원 행렬 형태의 리스트
:return: None
'''
readcsv = open('data/exam.csv', mode ='r', encoding='utf-8')
line=readcsv.readline()
while line:
print(line.strip())
line = readcsv.readline()
readcsv.close()
# def get_sum_mean(data : list, col: int) -> tuple:
# '''
# 주어진 2차원 리스트(data)에서 해당 컬럼(col)의 데이터들의
# 총합(sum)과 평균(mean)을 계산해서 리턴
#
# :param data: 2차원 행렬 형태의 리스트
# :param col: 컬럼 인덱스(0,1,2,...)
# :return: 컬럼 데이터의 합과 평균
# ''' | lec07_file/file07.py | 1,259 | 2차원 리스트의 내용을 출력
1 10 20 30 40
2 11 21 31 41
...
:param data: 2차원 행렬 형태의 리스트
:return: None
file.readline() 사용해서 csv 파일 열기
def my_csv_reader(fn:str, header=True) -> list: ''' csv 파일의 데이터 2차원 행렬 형태로 리턴 :param fn: 읽을 파일 이름(예: data\\exam.csv) :param header: csv파일의 헤더 존재 여부 :return: csv 파일에서 헤더는 제외한 데이터로 이루어진 2차원 리스트 ''' if __name__ == '__main__': 작성한 함수들을 테스트 pass def get_sum_mean(data : list, col: int) -> tuple: ''' 주어진 2차원 리스트(data)에서 해당 컬럼(col)의 데이터들의 총합(sum)과 평균(mean)을 계산해서 리턴 :param data: 2차원 행렬 형태의 리스트 :param col: 컬럼 인덱스(0,1,2,...) :return: 컬럼 데이터의 합과 평균 ''' | 630 | ko | 0.99924 |
from urllib.parse import urlparse
from django.conf import settings
from django.db import models
from django_extensions.db.fields import UUIDField
from pyrabbit.http import HTTPError
from django.contrib.sites.models import Site
from apps.queues import rabbit
class Queue(models.Model):
name = models.CharField(max_length=64)
vhost = UUIDField(unique=True)
is_public = models.BooleanField(default=False)
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
organizers = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='organizers',
blank=True,
null=True,
help_text="(Organizers allowed to view this queue when they assign their competition to a queue)"
)
def __str__(self):
return self.name
@property
def broker_url(self):
# Start with pyamqp://guest:guest@localhost:5672//
broker_url_parts = urlparse(settings.BROKER_URL)
# Get localhost:5672
host = Site.objects.get_current().domain
return "pyamqp://{}:{}@{}:{}/{}".format(
self.owner.rabbitmq_username,
self.owner.rabbitmq_password,
host,
settings.RABBITMQ_PORT,
self.vhost
)
def delete(self, using=None):
try:
rabbit.delete_vhost(self.vhost)
except HTTPError:
# Vhost not found or something
pass
return super(Queue, self).delete(using)
| codalab/apps/queues/models.py | 1,466 | Start with pyamqp://guest:guest@localhost:5672// Get localhost:5672 Vhost not found or something | 96 | en | 0.66541 |
#
# This example is again a graph coloring problem. In this case, however,
# a stronger object oriented approach is adopted to show how Coopy is
# indeed compatible with such practices.
#
import coopy
import random
class Node:
def __init__(self):
self._color = coopy.symbolic_int('c')
self._neighbors = set()
@property
def color(self):
return self._color
@property
def has_valid_connections(self):
return coopy.all([self.color != n.color for n in self._neighbors])
def direct_edge_towards(self, other):
self._neighbors.add(other)
def __repr__(self):
return str(self.color)
def construct_k_colored_graph(k, n, p):
"""
Constructs a k colored graph of n nodes in which a pair
of nodes shares an edge with probability 0 <= p <= 1.
Note: this code is for demonstrative purposes only; the
solution for such a problem will not necessarily exist,
in which case the concretization process will throw
an exception.
"""
with coopy.scope():
# Instantiate n nodes.
nodes = [Node() for i in range(n)]
# Connect nodes with probability p.
for i in range(n-1):
for j in range(i+1,n):
a = nodes[i]
b = nodes[j]
if random.uniform(0,1) < p:
a.direct_edge_towards(b)
b.direct_edge_towards(a)
# Impose restrictions over the nodes.
for node in nodes:
coopy.any([node.color == i for i in range(k)]).require()
node.has_valid_connections.require()
# Concretize the graph and return it as a list of nodes.
coopy.concretize()
return nodes
graph = construct_k_colored_graph(3, 10, 0.2)
print(graph) | examples/example-5.py | 1,799 | Constructs a k colored graph of n nodes in which a pair
of nodes shares an edge with probability 0 <= p <= 1.
Note: this code is for demonstrative purposes only; the
solution for such a problem will not necessarily exist,
in which case the concretization process will throw
an exception.
This example is again a graph coloring problem. In this case, however, a stronger object oriented approach is adopted to show how Coopy is indeed compatible with such practices. Instantiate n nodes. Connect nodes with probability p. Impose restrictions over the nodes. Concretize the graph and return it as a list of nodes. | 614 | en | 0.926247 |
# -*- coding: utf-8 -*-
#
# Electrum - lightweight Dogecoin client
# Copyright (C) 2018 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import json
from .util import inv_dict, all_subclasses
from . import bitcoin
def read_json(filename, default):
path = os.path.join(os.path.dirname(__file__), filename)
try:
with open(path, 'r') as f:
r = json.loads(f.read())
except:
r = default
return r
GIT_REPO_URL = "https://github.com/spesmilo/electrum"
GIT_REPO_ISSUES_URL = "https://github.com/spesmilo/electrum/issues"
BIP39_WALLET_FORMATS = read_json('bip39_wallet_formats.json', [])
class AbstractNet:
NET_NAME: str
TESTNET: bool
WIF_PREFIX: int
ADDRTYPE_P2PKH: int
ADDRTYPE_P2SH: int
SEGWIT_HRP: str
# BOLT11_HRP: str
GENESIS: str
BLOCK_HEIGHT_FIRST_LIGHTNING_CHANNELS: int = 0
BIP44_COIN_TYPE: int
LN_REALM_BYTE: int
@classmethod
def max_checkpoint(cls) -> int:
return max(0, len(cls.CHECKPOINTS) * 2016 - 1)
@classmethod
def rev_genesis_bytes(cls) -> bytes:
return bytes.fromhex(bitcoin.rev_hex(cls.GENESIS))
class BitcoinMainnet(AbstractNet):
TESTNET = False
WIF_PREFIX = 158
ADDRTYPE_P2PKH = 30
ADDRTYPE_P2SH = 22
SEGWIT_HRP = "doge"
# GENESIS = "000000000062b72c5e2ceb45fbc8587e807c155b0da735e6483dfba2f0a9c770"
GENESIS = "1a91e3dace36e2be3bf030a65679fe821aa1d6ef92e7c9902eb318182c355691"
DEFAULT_PORTS = {'t': '50011', 's': '50022'}
DEFAULT_SERVERS = read_json('servers.json', {})
CHECKPOINTS = read_json('', [])
BLOCK_HEIGHT_FIRST_LIGHTNING_CHANNELS = 200
#BITCOIN_HEADER_PRIV = "02fac398"
#BITCOIN_HEADER_PUB = "02facafd"
XPRV_HEADERS = {
'standard': 0x02fac398, # xprv
# 'p2wpkh-p2sh': 0x02fac398, # yprv
# 'p2wsh-p2sh': 0x02fac398, # Yprv
# 'p2wpkh': 0x02fac398, # zprv
# 'p2wsh': 0x02fac398, # Zprv
}
XPRV_HEADERS_INV = inv_dict(XPRV_HEADERS)
XPUB_HEADERS = {
'standard': 0x02facafd, # xpub
# 'p2wpkh-p2sh': 0x02facafd, # ypub
# 'p2wsh-p2sh': 0x02facafd, # Ypub
# 'p2wpkh': 0x02facafd, # zpub
# 'p2wsh': 0x02facafd, # Zpub
}
XPUB_HEADERS_INV = inv_dict(XPUB_HEADERS)
# BIP44_COIN_TYPE = 1
#namecoin
# BIP44_COIN_TYPE = 7
# dogecoin
BIP44_COIN_TYPE = 1
LN_REALM_BYTE = 0
LN_DNS_SEEDS = [
'radioblockchain.info',
'radiopool.me',
]
AUXPOW_CHAIN_ID = 0x00620004
AUXPOW_START_HEIGHT = 371337
NAME_EXPIRATION = 60
class BitcoinTestnet(AbstractNet):
TESTNET = True
WIF_PREFIX = 239
ADDRTYPE_P2PKH = 111
ADDRTYPE_P2SH = 196
SEGWIT_HRP = "xdoge"
GENESIS = "00000a2ee9363d21e47bc10d5b1e39d4ae4bd950491790e522f90dad86d2d1eb"
# GENESIS = "00000007199508e34a9ff81e6ec0c477a4cccff2a4767a8eee39c11db367b008"
DEFAULT_PORTS = {'t': '51001', 's': '51002'}
DEFAULT_SERVERS = read_json('servers_testnet.json', {})
CHECKPOINTS = read_json('checkpoints_testnet.json', [])
XPRV_HEADERS = {
'standard': 0x04358394, # tprv
# 'p2wpkh-p2sh': 0x044a4e28, # uprv
# 'p2wsh-p2sh': 0x024285b5, # Uprv
# 'p2wpkh': 0x045f18bc, # vprv
# 'p2wsh': 0x02575048, # Vprv
}
XPRV_HEADERS_INV = inv_dict(XPRV_HEADERS)
XPUB_HEADERS = {
'standard': 0x043587cf, # tpub
# 'p2wpkh-p2sh': 0x044a5262, # upub
# 'p2wsh-p2sh': 0x024289ef, # Upub
# 'p2wpkh': 0x045f1cf6, # vpub
# 'p2wsh': 0x02575483, # Vpub
}
XPUB_HEADERS_INV = inv_dict(XPUB_HEADERS)
BIP44_COIN_TYPE = 3
LN_REALM_BYTE = 1
LN_DNS_SEEDS = []
AUXPOW_CHAIN_ID = 0x0062
AUXPOW_START_HEIGHT = 200
NAME_EXPIRATION = 36000
class BitcoinRegtest(BitcoinTestnet):
SEGWIT_HRP = "ncrt"
GENESIS = "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"
DEFAULT_SERVERS = read_json('servers_regtest.json', {})
CHECKPOINTS = []
LN_DNS_SEEDS = []
NAME_EXPIRATION = 30
class BitcoinSimnet(BitcoinTestnet):
WIF_PREFIX = 0x64
ADDRTYPE_P2PKH = 0x3f
ADDRTYPE_P2SH = 0x7b
SEGWIT_HRP = "sb"
GENESIS = "683e86bd5c6d110d91b94b97137ba6bfe02dbbdb8e3dff722a669b5d69d77af6"
DEFAULT_SERVERS = read_json('servers_regtest.json', {})
CHECKPOINTS = []
LN_DNS_SEEDS = []
NETS_LIST = tuple(all_subclasses(AbstractNet))
# don't import net directly, import the module instead (so that net is singleton)
net = BitcoinMainnet
def set_signet():
global net
net = BitcoinSignet
def set_simnet():
global net
net = BitcoinSimnet
def set_mainnet():
global net
net = BitcoinMainnet
def set_testnet():
global net
net = BitcoinTestnet
def set_regtest():
global net
net = BitcoinRegtest
| electrum/constants.py | 5,885 | -*- coding: utf-8 -*- Electrum - lightweight Dogecoin client Copyright (C) 2018 The Electrum developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. BOLT11_HRP: str GENESIS = "000000000062b72c5e2ceb45fbc8587e807c155b0da735e6483dfba2f0a9c770"BITCOIN_HEADER_PRIV = "02fac398"BITCOIN_HEADER_PUB = "02facafd" xprv 'p2wpkh-p2sh': 0x02fac398, yprv 'p2wsh-p2sh': 0x02fac398, Yprv 'p2wpkh': 0x02fac398, zprv 'p2wsh': 0x02fac398, Zprv xpub 'p2wpkh-p2sh': 0x02facafd, ypub 'p2wsh-p2sh': 0x02facafd, Ypub 'p2wpkh': 0x02facafd, zpub 'p2wsh': 0x02facafd, Zpub BIP44_COIN_TYPE = 1namecoin BIP44_COIN_TYPE = 7 dogecoin GENESIS = "00000007199508e34a9ff81e6ec0c477a4cccff2a4767a8eee39c11db367b008" tprv 'p2wpkh-p2sh': 0x044a4e28, uprv 'p2wsh-p2sh': 0x024285b5, Uprv 'p2wpkh': 0x045f18bc, vprv 'p2wsh': 0x02575048, Vprv tpub 'p2wpkh-p2sh': 0x044a5262, upub 'p2wsh-p2sh': 0x024289ef, Upub 'p2wpkh': 0x045f1cf6, vpub 'p2wsh': 0x02575483, Vpub don't import net directly, import the module instead (so that net is singleton) | 2,187 | en | 0.717102 |
# Copyright 2015 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`socket` --- Low-level socket library
==========================================
"""
# Stdlib
import logging
import os
import selectors
import struct
from abc import abstractmethod
from errno import EHOSTUNREACH, ENETUNREACH
from socket import (
AF_INET,
AF_INET6,
AF_UNIX,
MSG_DONTWAIT,
SOCK_DGRAM,
SOCK_STREAM,
SOL_SOCKET,
SO_REUSEADDR,
socket,
)
# External
from external import ipaddress
# SCION
from lib.defines import SCION_BUFLEN
from lib.dispatcher import reg_dispatcher
from lib.errors import SCIONIOError
from lib.packet.host_addr import haddr_get_type, haddr_parse_interface
from lib.packet.scmp.errors import SCMPUnreachHost, SCMPUnreachNet
from lib.util import recv_all
from lib.thread import kill_self
from lib.types import AddrType
class Socket(object):
"""
Base class for socket wrappers
"""
@abstractmethod
def bind(self, addr, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def send(self, data, dst=None):
raise NotImplementedError
@abstractmethod
def recv(self, block=True):
raise NotImplementedError
def close(self): # pragma: no cover
"""
Close the socket.
"""
self.sock.close()
def settimeout(self, timeout): # pragma: no cover
prev = self.sock.gettimeout()
self.sock.settimeout(timeout)
return prev
def is_active(self):
return True
class UDPSocket(Socket):
"""
Thin wrapper around BSD/POSIX UDP sockets.
"""
def __init__(self, bind=None, addr_type=AddrType.IPV6, reuse=False):
"""
Initialize a UDP socket, then call superclass init for socket options
and binding.
:param tuple bind:
Optional tuple of (`str`, `int`, `str`) describing respectively the
address and port to bind to, and an optional description.
:param addr_type:
Socket domain. Must be one of :const:`~lib.types.AddrType.IPV4`,
:const:`~lib.types.AddrType.IPV6` (default).
:param reuse:
Boolean value indicating whether SO_REUSEADDR option should be set.
"""
assert addr_type in (AddrType.IPV4, AddrType.IPV6)
self._addr_type = addr_type
af_domain = AF_INET6
if self._addr_type == AddrType.IPV4:
af_domain = AF_INET
self.sock = socket(af_domain, SOCK_DGRAM)
if reuse:
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.port = None
if bind:
self.bind(*bind)
self.active = True
def bind(self, addr, port=0, desc=None):
"""
Bind socket to the specified address & port. If `addr` is ``None``, the
socket will bind to all interfaces.
:param str addr: Address to bind to (can be ``None``, see above).
:param int port: Port to bind to.
:param str desc: Optional purpose of the port.
"""
if addr is None:
addr = "::"
if self._addr_type == AddrType.IPV4:
addr = ""
try:
self.sock.bind((addr, port))
except OSError as e:
logging.critical("Error binding to [%s]:%s: %s", addr, port, e)
kill_self()
self.port = self.sock.getsockname()[1]
if desc:
logging.debug("%s bound to %s:%d", desc, addr, self.port)
def send(self, data, dst=None):
"""
Send data to a specified destination.
:param bytes data: Data to send.
:param tuple dst:
Tuple of (`str`, `int`) describing the destination address and port,
respectively.
"""
try:
ret = self.sock.sendto(data, dst)
except OSError as e:
errno = e.args[0]
logging.error("Error sending %dB to %s: %s", len(data), dst, e)
if errno == ENETUNREACH:
raise SCMPUnreachNet(dst)
elif errno == EHOSTUNREACH:
raise SCMPUnreachHost(dst)
return False
if ret != len(data):
logging.error("Wanted to send %dB, only sent %dB", len(data), ret)
return False
return True
def recv(self, block=True):
"""
Read data from socket.
:returns:
Tuple of (`bytes`, (`str`, `int`) containing the data, and remote
host/port respectively.
"""
flags = 0
if not block:
flags = MSG_DONTWAIT
while True:
try:
return self.sock.recvfrom(SCION_BUFLEN, flags)
except InterruptedError:
pass
class ReliableSocket(Socket):
"""
Wrapper around Unix socket with message framing functionality baked in
"""
COOKIE = bytes.fromhex("de00ad01be02ef03")
COOKIE_LEN = len(COOKIE)
def __init__(self, reg=None, bind_ip=(), bind_unix=None, sock=None):
"""
Initialise a socket of the specified type, and optionally bind it to an
address/port.
:param tuple reg:
Optional tuple of (`SCIONAddr`, `int`, `SVCType`, `bool`)
describing respectively the address, port, SVC type, and init value
to register with the dispatcher. In sockets that do not connect to
the dispatcher, this argument is None.
:param tuple bind_ip:
Optional tuple of (`SCIONAddr`, `int`) describing the address and port
of the bind address. Only needed if the bind address is different from
the public address.
:param tuple bind_unix:
Optional tuple of (`str`, `str`) describing path to bind to, and an
optional description.
:param sock:
Optional socket file object to build instance around.
"""
self.sock = sock or socket(AF_UNIX, SOCK_STREAM)
self.addr = None
if reg:
addr, port, init, svc = reg
self.registered = reg_dispatcher(
self, addr, port, bind_ip, init, svc)
if bind_unix:
self.bind(*bind_unix)
self.active = True
@classmethod
def from_socket(cls, sock):
return cls(None, sock=sock)
def bind(self, addr, desc=None):
self.addr = addr
# Use 0666 for socket permissions
old_mask = os.umask(0o111)
try:
self.sock.bind(addr)
except OSError as e:
logging.critical("Error binding to %s: %s", addr, e)
kill_self()
os.umask(old_mask)
self.sock.listen(5)
if desc:
logging.debug("%s bound to %s", desc, addr)
def accept(self, block=True):
prev = self.sock.gettimeout()
if not block:
self.sock.settimeout(0)
try:
s = self.sock.accept()[0]
except OSError as e:
logging.error("error accepting socket: %s", e)
return None
finally:
self.sock.settimeout(prev)
return ReliableSocket.from_socket(s)
def connect(self, addr):
self.sock.connect(addr)
def send(self, data, dst=None):
"""
Send data through the socket.
:param bytes data: Data to send.
"""
if dst:
dst_addr, dst_port = dst
if isinstance(dst_addr, str):
dst_addr = haddr_parse_interface(dst_addr)
addr_type = struct.pack("B", dst_addr.TYPE)
packed_dst = dst_addr.pack() + struct.pack("!H", dst_port)
else:
addr_type = struct.pack("B", AddrType.NONE)
packed_dst = b""
data_len = struct.pack("!I", len(data))
data = b"".join([self.COOKIE, addr_type, data_len, packed_dst, data])
try:
self.sock.sendall(data)
return True
except OSError as e:
logging.error("error in send: %s", e)
return False
def recv(self, block=True):
"""
Read data from socket.
:returns: bytestring containing received data.
"""
flags = 0
if not block:
flags = MSG_DONTWAIT
buf = recv_all(self.sock, self.COOKIE_LEN + 5, flags)
if not buf:
return None, None
cookie, addr_type, packet_len = struct.unpack("!8sBI", buf)
if cookie != self.COOKIE:
raise SCIONIOError("Dispatcher socket out of sync")
port_len = 0
if addr_type != AddrType.NONE:
port_len = 2
addr_len = haddr_get_type(addr_type).LEN
# We know there is data coming, block here to avoid sync problems.
buf = recv_all(self.sock, addr_len + port_len + packet_len, 0)
if addr_len > 0:
addr = buf[:addr_len]
port = struct.unpack("!H", buf[addr_len:addr_len + port_len])
sender = (str(ipaddress.ip_address(addr)), port)
else:
addr = ""
port = 0
sender = (None, None)
packet = buf[addr_len + port_len:]
return packet, sender
def close(self):
super().close()
if not self.addr:
return
try:
os.unlink(self.addr)
except OSError as e:
logging.critical("Error unlinking unix socket: %s", e)
kill_self()
class SocketMgr(object):
"""
:class:`Socket` manager.
"""
def __init__(self): # pragma: no cover
self._sel = selectors.DefaultSelector()
def add(self, sock, callback): # pragma: no cover
"""
Add new socket.
:param UDPSocket sock: UDPSocket to add.
"""
if not sock.is_active():
return
self._sel.register(sock.sock, selectors.EVENT_READ, (sock, callback))
def remove(self, sock): # pragma: no cover
"""
Remove socket.
:param UDPSocket sock: UDPSocket to remove.
"""
self._sel.unregister(sock.sock)
def select_(self, timeout=None):
"""
Return the set of UDPSockets that have data pending.
:param float timeout:
Number of seconds to wait for at least one UDPSocket to become
ready. ``None`` means wait forever.
"""
for key, _ in self._sel.select(timeout=timeout):
yield key.data
def close(self):
"""
Close all sockets.
"""
mapping = self._sel.get_map()
if mapping:
for entry in list(mapping.values()):
sock = entry.data[0]
self.remove(sock)
sock.close()
self._sel.close()
| python/lib/socket.py | 11,246 | Wrapper around Unix socket with message framing functionality baked in
Base class for socket wrappers
:class:`Socket` manager.
Thin wrapper around BSD/POSIX UDP sockets.
Initialize a UDP socket, then call superclass init for socket options
and binding.
:param tuple bind:
Optional tuple of (`str`, `int`, `str`) describing respectively the
address and port to bind to, and an optional description.
:param addr_type:
Socket domain. Must be one of :const:`~lib.types.AddrType.IPV4`,
:const:`~lib.types.AddrType.IPV6` (default).
:param reuse:
Boolean value indicating whether SO_REUSEADDR option should be set.
Initialise a socket of the specified type, and optionally bind it to an
address/port.
:param tuple reg:
Optional tuple of (`SCIONAddr`, `int`, `SVCType`, `bool`)
describing respectively the address, port, SVC type, and init value
to register with the dispatcher. In sockets that do not connect to
the dispatcher, this argument is None.
:param tuple bind_ip:
Optional tuple of (`SCIONAddr`, `int`) describing the address and port
of the bind address. Only needed if the bind address is different from
the public address.
:param tuple bind_unix:
Optional tuple of (`str`, `str`) describing path to bind to, and an
optional description.
:param sock:
Optional socket file object to build instance around.
Add new socket.
:param UDPSocket sock: UDPSocket to add.
Bind socket to the specified address & port. If `addr` is ``None``, the
socket will bind to all interfaces.
:param str addr: Address to bind to (can be ``None``, see above).
:param int port: Port to bind to.
:param str desc: Optional purpose of the port.
Close the socket.
Close all sockets.
Read data from socket.
:returns:
Tuple of (`bytes`, (`str`, `int`) containing the data, and remote
host/port respectively.
Read data from socket.
:returns: bytestring containing received data.
Remove socket.
:param UDPSocket sock: UDPSocket to remove.
Return the set of UDPSockets that have data pending.
:param float timeout:
Number of seconds to wait for at least one UDPSocket to become
ready. ``None`` means wait forever.
Send data to a specified destination.
:param bytes data: Data to send.
:param tuple dst:
Tuple of (`str`, `int`) describing the destination address and port,
respectively.
Send data through the socket.
:param bytes data: Data to send.
:mod:`socket` --- Low-level socket library
==========================================
Copyright 2015 ETH Zurich Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Stdlib External SCION pragma: no cover pragma: no cover Use 0666 for socket permissions We know there is data coming, block here to avoid sync problems. pragma: no cover pragma: no cover pragma: no cover | 3,250 | en | 0.690426 |
#!/bin/python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Auth, Mail, PluginManager and various utilities
------------------------------------------------
"""
import base64
try:
import cPickle as pickle
except:
import pickle
import datetime
import thread
import logging
import sys
import glob
import os
import re
import time
import traceback
import smtplib
import urllib
import urllib2
import Cookie
import cStringIO
import ConfigParser
import email.utils
import random
from email import MIMEBase, MIMEMultipart, MIMEText, Encoders, Header, message_from_string, Charset
from gluon.contenttype import contenttype
from gluon.storage import Storage, StorageList, Settings, Messages
from gluon.utils import web2py_uuid
from gluon.fileutils import read_file, check_credentials
from gluon import *
from gluon.contrib.autolinks import expand_one
from gluon.contrib.markmin.markmin2html import \
replace_at_urls, replace_autolinks, replace_components
from pydal.objects import Row, Set, Query
import gluon.serializers as serializers
Table = DAL.Table
Field = DAL.Field
try:
# try stdlib (Python 2.6)
import json as json_parser
except ImportError:
try:
# try external module
import simplejson as json_parser
except:
# fallback to pure-Python module
import gluon.contrib.simplejson as json_parser
__all__ = ['Mail', 'Auth', 'Recaptcha', 'Crud', 'Service', 'Wiki',
'PluginManager', 'fetch', 'geocode', 'reverse_geocode', 'prettydate']
### mind there are two loggers here (logger and crud.settings.logger)!
logger = logging.getLogger("web2py")
DEFAULT = lambda: None
def getarg(position, default=None):
args = current.request.args
if position < 0 and len(args) >= -position:
return args[position]
elif position >= 0 and len(args) > position:
return args[position]
else:
return default
def callback(actions, form, tablename=None):
if actions:
if tablename and isinstance(actions, dict):
actions = actions.get(tablename, [])
if not isinstance(actions, (list, tuple)):
actions = [actions]
[action(form) for action in actions]
def validators(*a):
b = []
for item in a:
if isinstance(item, (list, tuple)):
b = b + list(item)
else:
b.append(item)
return b
def call_or_redirect(f, *args):
if callable(f):
redirect(f(*args))
else:
redirect(f)
def replace_id(url, form):
if url:
url = url.replace('[id]', str(form.vars.id))
if url[0] == '/' or url[:4] == 'http':
return url
return URL(url)
class Mail(object):
"""
Class for configuring and sending emails with alternative text / html
body, multiple attachments and encryption support
Works with SMTP and Google App Engine.
Args:
server: SMTP server address in address:port notation
sender: sender email address
login: sender login name and password in login:password notation
or None if no authentication is required
tls: enables/disables encryption (True by default)
In Google App Engine use ::
server='gae'
For sake of backward compatibility all fields are optional and default
to None, however, to be able to send emails at least server and sender
must be specified. They are available under following fields::
mail.settings.server
mail.settings.sender
mail.settings.login
mail.settings.timeout = 60 # seconds (default)
When server is 'logging', email is logged but not sent (debug mode)
Optionally you can use PGP encryption or X509::
mail.settings.cipher_type = None
mail.settings.gpg_home = None
mail.settings.sign = True
mail.settings.sign_passphrase = None
mail.settings.encrypt = True
mail.settings.x509_sign_keyfile = None
mail.settings.x509_sign_certfile = None
mail.settings.x509_sign_chainfile = None
mail.settings.x509_nocerts = False
mail.settings.x509_crypt_certfiles = None
cipher_type : None
gpg - need a python-pyme package and gpgme lib
x509 - smime
gpg_home : you can set a GNUPGHOME environment variable
to specify home of gnupg
sign : sign the message (True or False)
sign_passphrase : passphrase for key signing
encrypt : encrypt the message (True or False). It defaults
to True
... x509 only ...
x509_sign_keyfile : the signers private key filename or
string containing the key. (PEM format)
x509_sign_certfile: the signers certificate filename or
string containing the cert. (PEM format)
x509_sign_chainfile: sets the optional all-in-one file where you
can assemble the certificates of Certification
Authorities (CA) which form the certificate
chain of email certificate. It can be a
string containing the certs to. (PEM format)
x509_nocerts : if True then no attached certificate in mail
x509_crypt_certfiles: the certificates file or strings to encrypt
the messages with can be a file name /
string or a list of file names /
strings (PEM format)
Examples:
Create Mail object with authentication data for remote server::
mail = Mail('example.com:25', 'me@example.com', 'me:password')
Notice for GAE users:
attachments have an automatic content_id='attachment-i' where i is progressive number
in this way the can be referenced from the HTML as <img src="cid:attachment-0" /> etc.
"""
class Attachment(MIMEBase.MIMEBase):
"""
Email attachment
Args:
payload: path to file or file-like object with read() method
filename: name of the attachment stored in message; if set to
None, it will be fetched from payload path; file-like
object payload must have explicit filename specified
content_id: id of the attachment; automatically contained within
`<` and `>`
content_type: content type of the attachment; if set to None,
it will be fetched from filename using gluon.contenttype
module
encoding: encoding of all strings passed to this function (except
attachment body)
Content ID is used to identify attachments within the html body;
in example, attached image with content ID 'photo' may be used in
html message as a source of img tag `<img src="cid:photo" />`.
Example::
Create attachment from text file::
attachment = Mail.Attachment('/path/to/file.txt')
Content-Type: text/plain
MIME-Version: 1.0
Content-Disposition: attachment; filename="file.txt"
Content-Transfer-Encoding: base64
SOMEBASE64CONTENT=
Create attachment from image file with custom filename and cid::
attachment = Mail.Attachment('/path/to/file.png',
filename='photo.png',
content_id='photo')
Content-Type: image/png
MIME-Version: 1.0
Content-Disposition: attachment; filename="photo.png"
Content-Id: <photo>
Content-Transfer-Encoding: base64
SOMEOTHERBASE64CONTENT=
"""
def __init__(
self,
payload,
filename=None,
content_id=None,
content_type=None,
encoding='utf-8'):
if isinstance(payload, str):
if filename is None:
filename = os.path.basename(payload)
payload = read_file(payload, 'rb')
else:
if filename is None:
raise Exception('Missing attachment name')
payload = payload.read()
filename = filename.encode(encoding)
if content_type is None:
content_type = contenttype(filename)
self.my_filename = filename
self.my_payload = payload
MIMEBase.MIMEBase.__init__(self, *content_type.split('/', 1))
self.set_payload(payload)
self['Content-Disposition'] = 'attachment; filename="%s"' % filename
if not content_id is None:
self['Content-Id'] = '<%s>' % content_id.encode(encoding)
Encoders.encode_base64(self)
def __init__(self, server=None, sender=None, login=None, tls=True):
settings = self.settings = Settings()
settings.server = server
settings.sender = sender
settings.login = login
settings.tls = tls
settings.timeout = 60 # seconds
settings.hostname = None
settings.ssl = False
settings.cipher_type = None
settings.gpg_home = None
settings.sign = True
settings.sign_passphrase = None
settings.encrypt = True
settings.x509_sign_keyfile = None
settings.x509_sign_certfile = None
settings.x509_sign_chainfile = None
settings.x509_nocerts = False
settings.x509_crypt_certfiles = None
settings.debug = False
settings.lock_keys = True
self.result = {}
self.error = None
def send(self,
to,
subject='[no subject]',
message='[no message]',
attachments=None,
cc=None,
bcc=None,
reply_to=None,
sender=None,
encoding='utf-8',
raw=False,
headers={},
from_address=None,
cipher_type=None,
sign=None,
sign_passphrase=None,
encrypt=None,
x509_sign_keyfile=None,
x509_sign_chainfile=None,
x509_sign_certfile=None,
x509_crypt_certfiles=None,
x509_nocerts=None
):
"""
Sends an email using data specified in constructor
Args:
to: list or tuple of receiver addresses; will also accept single
object
subject: subject of the email
message: email body text; depends on type of passed object:
- if 2-list or 2-tuple is passed: first element will be
source of plain text while second of html text;
- otherwise: object will be the only source of plain text
and html source will be set to None
If text or html source is:
- None: content part will be ignored,
- string: content part will be set to it,
- file-like object: content part will be fetched from it using
it's read() method
attachments: list or tuple of Mail.Attachment objects; will also
accept single object
cc: list or tuple of carbon copy receiver addresses; will also
accept single object
bcc: list or tuple of blind carbon copy receiver addresses; will
also accept single object
reply_to: address to which reply should be composed
encoding: encoding of all strings passed to this method (including
message bodies)
headers: dictionary of headers to refine the headers just before
sending mail, e.g. `{'X-Mailer' : 'web2py mailer'}`
from_address: address to appear in the 'From:' header, this is not
the envelope sender. If not specified the sender will be used
cipher_type :
gpg - need a python-pyme package and gpgme lib
x509 - smime
gpg_home : you can set a GNUPGHOME environment variable
to specify home of gnupg
sign : sign the message (True or False)
sign_passphrase : passphrase for key signing
encrypt : encrypt the message (True or False). It defaults to True.
... x509 only ...
x509_sign_keyfile : the signers private key filename or
string containing the key. (PEM format)
x509_sign_certfile: the signers certificate filename or
string containing the cert. (PEM format)
x509_sign_chainfile: sets the optional all-in-one file where you
can assemble the certificates of Certification
Authorities (CA) which form the certificate
chain of email certificate. It can be a
string containing the certs to. (PEM format)
x509_nocerts : if True then no attached certificate in mail
x509_crypt_certfiles: the certificates file or strings to encrypt
the messages with can be a file name / string or
a list of file names / strings (PEM format)
Examples:
Send plain text message to single address::
mail.send('you@example.com',
'Message subject',
'Plain text body of the message')
Send html message to single address::
mail.send('you@example.com',
'Message subject',
'<html>Plain text body of the message</html>')
Send text and html message to three addresses (two in cc)::
mail.send('you@example.com',
'Message subject',
('Plain text body', '<html>html body</html>'),
cc=['other1@example.com', 'other2@example.com'])
Send html only message with image attachment available from the
message by 'photo' content id::
mail.send('you@example.com',
'Message subject',
(None, '<html><img src="cid:photo" /></html>'),
Mail.Attachment('/path/to/photo.jpg'
content_id='photo'))
Send email with two attachments and no body text::
mail.send('you@example.com,
'Message subject',
None,
[Mail.Attachment('/path/to/fist.file'),
Mail.Attachment('/path/to/second.file')])
Returns:
True on success, False on failure.
Before return, method updates two object's fields:
- self.result: return value of smtplib.SMTP.sendmail() or GAE's
mail.send_mail() method
- self.error: Exception message or None if above was successful
"""
# We don't want to use base64 encoding for unicode mail
Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8')
def encode_header(key):
if [c for c in key if 32 > ord(c) or ord(c) > 127]:
return Header.Header(key.encode('utf-8'), 'utf-8')
else:
return key
# encoded or raw text
def encoded_or_raw(text):
if raw:
text = encode_header(text)
return text
sender = sender or self.settings.sender
if not isinstance(self.settings.server, str):
raise Exception('Server address not specified')
if not isinstance(sender, str):
raise Exception('Sender address not specified')
if not raw and attachments:
# Use multipart/mixed if there is attachments
payload_in = MIMEMultipart.MIMEMultipart('mixed')
elif raw:
# no encoding configuration for raw messages
if not isinstance(message, basestring):
message = message.read()
if isinstance(message, unicode):
text = message.encode('utf-8')
elif not encoding == 'utf-8':
text = message.decode(encoding).encode('utf-8')
else:
text = message
# No charset passed to avoid transport encoding
# NOTE: some unicode encoded strings will produce
# unreadable mail contents.
payload_in = MIMEText.MIMEText(text)
if to:
if not isinstance(to, (list, tuple)):
to = [to]
else:
raise Exception('Target receiver address not specified')
if cc:
if not isinstance(cc, (list, tuple)):
cc = [cc]
if bcc:
if not isinstance(bcc, (list, tuple)):
bcc = [bcc]
if message is None:
text = html = None
elif isinstance(message, (list, tuple)):
text, html = message
elif message.strip().startswith('<html') and \
message.strip().endswith('</html>'):
text = self.settings.server == 'gae' and message or None
html = message
else:
text = message
html = None
if (not text is None or not html is None) and (not raw):
if not text is None:
if not isinstance(text, basestring):
text = text.read()
if isinstance(text, unicode):
text = text.encode('utf-8')
elif not encoding == 'utf-8':
text = text.decode(encoding).encode('utf-8')
if not html is None:
if not isinstance(html, basestring):
html = html.read()
if isinstance(html, unicode):
html = html.encode('utf-8')
elif not encoding == 'utf-8':
html = html.decode(encoding).encode('utf-8')
# Construct mime part only if needed
if text is not None and html:
# We have text and html we need multipart/alternative
attachment = MIMEMultipart.MIMEMultipart('alternative')
attachment.attach(MIMEText.MIMEText(text, _charset='utf-8'))
attachment.attach(
MIMEText.MIMEText(html, 'html', _charset='utf-8'))
elif text is not None:
attachment = MIMEText.MIMEText(text, _charset='utf-8')
elif html:
attachment = \
MIMEText.MIMEText(html, 'html', _charset='utf-8')
if attachments:
# If there is attachments put text and html into
# multipart/mixed
payload_in.attach(attachment)
else:
# No attachments no multipart/mixed
payload_in = attachment
if (attachments is None) or raw:
pass
elif isinstance(attachments, (list, tuple)):
for attachment in attachments:
payload_in.attach(attachment)
else:
payload_in.attach(attachments)
#######################################################
# CIPHER #
#######################################################
cipher_type = cipher_type or self.settings.cipher_type
sign = sign if sign != None else self.settings.sign
sign_passphrase = sign_passphrase or self.settings.sign_passphrase
encrypt = encrypt if encrypt != None else self.settings.encrypt
#######################################################
# GPGME #
#######################################################
if cipher_type == 'gpg':
if self.settings.gpg_home:
# Set GNUPGHOME environment variable to set home of gnupg
import os
os.environ['GNUPGHOME'] = self.settings.gpg_home
if not sign and not encrypt:
self.error = "No sign and no encrypt is set but cipher type to gpg"
return False
# need a python-pyme package and gpgme lib
from pyme import core, errors
from pyme.constants.sig import mode
############################################
# sign #
############################################
if sign:
import string
core.check_version(None)
pin = string.replace(payload_in.as_string(), '\n', '\r\n')
plain = core.Data(pin)
sig = core.Data()
c = core.Context()
c.set_armor(1)
c.signers_clear()
# search for signing key for From:
for sigkey in c.op_keylist_all(sender, 1):
if sigkey.can_sign:
c.signers_add(sigkey)
if not c.signers_enum(0):
self.error = 'No key for signing [%s]' % sender
return False
c.set_passphrase_cb(lambda x, y, z: sign_passphrase)
try:
# make a signature
c.op_sign(plain, sig, mode.DETACH)
sig.seek(0, 0)
# make it part of the email
payload = MIMEMultipart.MIMEMultipart('signed',
boundary=None,
_subparts=None,
**dict(
micalg="pgp-sha1",
protocol="application/pgp-signature"))
# insert the origin payload
payload.attach(payload_in)
# insert the detached signature
p = MIMEBase.MIMEBase("application", 'pgp-signature')
p.set_payload(sig.read())
payload.attach(p)
# it's just a trick to handle the no encryption case
payload_in = payload
except errors.GPGMEError, ex:
self.error = "GPG error: %s" % ex.getstring()
return False
############################################
# encrypt #
############################################
if encrypt:
core.check_version(None)
plain = core.Data(payload_in.as_string())
cipher = core.Data()
c = core.Context()
c.set_armor(1)
# collect the public keys for encryption
recipients = []
rec = to[:]
if cc:
rec.extend(cc)
if bcc:
rec.extend(bcc)
for addr in rec:
c.op_keylist_start(addr, 0)
r = c.op_keylist_next()
if r is None:
self.error = 'No key for [%s]' % addr
return False
recipients.append(r)
try:
# make the encryption
c.op_encrypt(recipients, 1, plain, cipher)
cipher.seek(0, 0)
# make it a part of the email
payload = MIMEMultipart.MIMEMultipart('encrypted',
boundary=None,
_subparts=None,
**dict(protocol="application/pgp-encrypted"))
p = MIMEBase.MIMEBase("application", 'pgp-encrypted')
p.set_payload("Version: 1\r\n")
payload.attach(p)
p = MIMEBase.MIMEBase("application", 'octet-stream')
p.set_payload(cipher.read())
payload.attach(p)
except errors.GPGMEError, ex:
self.error = "GPG error: %s" % ex.getstring()
return False
#######################################################
# X.509 #
#######################################################
elif cipher_type == 'x509':
if not sign and not encrypt:
self.error = "No sign and no encrypt is set but cipher type to x509"
return False
import os
x509_sign_keyfile = x509_sign_keyfile or\
self.settings.x509_sign_keyfile
x509_sign_chainfile = x509_sign_chainfile or\
self.settings.x509_sign_chainfile
x509_sign_certfile = x509_sign_certfile or\
self.settings.x509_sign_certfile or\
x509_sign_keyfile or\
self.settings.x509_sign_certfile
# crypt certfiles could be a string or a list
x509_crypt_certfiles = x509_crypt_certfiles or\
self.settings.x509_crypt_certfiles
x509_nocerts = x509_nocerts or\
self.settings.x509_nocerts
# need m2crypto
try:
from M2Crypto import BIO, SMIME, X509
except Exception, e:
self.error = "Can't load M2Crypto module"
return False
msg_bio = BIO.MemoryBuffer(payload_in.as_string())
s = SMIME.SMIME()
# SIGN
if sign:
# key for signing
try:
keyfile_bio = BIO.openfile(x509_sign_keyfile)\
if os.path.isfile(x509_sign_keyfile)\
else BIO.MemoryBuffer(x509_sign_keyfile)
sign_certfile_bio = BIO.openfile(x509_sign_certfile)\
if os.path.isfile(x509_sign_certfile)\
else BIO.MemoryBuffer(x509_sign_certfile)
s.load_key_bio(keyfile_bio, sign_certfile_bio,
callback=lambda x: sign_passphrase)
if x509_sign_chainfile:
sk = X509.X509_Stack()
chain = X509.load_cert(x509_sign_chainfile)\
if os.path.isfile(x509_sign_chainfile)\
else X509.load_cert_string(x509_sign_chainfile)
sk.push(chain)
s.set_x509_stack(sk)
except Exception, e:
self.error = "Something went wrong on certificate / private key loading: <%s>" % str(e)
return False
try:
if x509_nocerts:
flags = SMIME.PKCS7_NOCERTS
else:
flags = 0
if not encrypt:
flags += SMIME.PKCS7_DETACHED
p7 = s.sign(msg_bio, flags=flags)
msg_bio = BIO.MemoryBuffer(payload_in.as_string(
)) # Recreate coz sign() has consumed it.
except Exception, e:
self.error = "Something went wrong on signing: <%s> %s" % (
str(e), str(flags))
return False
# ENCRYPT
if encrypt:
try:
sk = X509.X509_Stack()
if not isinstance(x509_crypt_certfiles, (list, tuple)):
x509_crypt_certfiles = [x509_crypt_certfiles]
# make an encryption cert's stack
for crypt_certfile in x509_crypt_certfiles:
certfile = X509.load_cert(crypt_certfile)\
if os.path.isfile(crypt_certfile)\
else X509.load_cert_string(crypt_certfile)
sk.push(certfile)
s.set_x509_stack(sk)
s.set_cipher(SMIME.Cipher('des_ede3_cbc'))
tmp_bio = BIO.MemoryBuffer()
if sign:
s.write(tmp_bio, p7)
else:
tmp_bio.write(payload_in.as_string())
p7 = s.encrypt(tmp_bio)
except Exception, e:
self.error = "Something went wrong on encrypting: <%s>" % str(e)
return False
# Final stage in sign and encryption
out = BIO.MemoryBuffer()
if encrypt:
s.write(out, p7)
else:
if sign:
s.write(out, p7, msg_bio, SMIME.PKCS7_DETACHED)
else:
out.write('\r\n')
out.write(payload_in.as_string())
out.close()
st = str(out.read())
payload = message_from_string(st)
else:
# no cryptography process as usual
payload = payload_in
if from_address:
payload['From'] = encoded_or_raw(from_address.decode(encoding))
else:
payload['From'] = encoded_or_raw(sender.decode(encoding))
origTo = to[:]
if to:
payload['To'] = encoded_or_raw(', '.join(to).decode(encoding))
if reply_to:
payload['Reply-To'] = encoded_or_raw(reply_to.decode(encoding))
if cc:
payload['Cc'] = encoded_or_raw(', '.join(cc).decode(encoding))
to.extend(cc)
if bcc:
to.extend(bcc)
payload['Subject'] = encoded_or_raw(subject.decode(encoding))
payload['Date'] = email.utils.formatdate()
for k, v in headers.iteritems():
payload[k] = encoded_or_raw(v.decode(encoding))
result = {}
try:
if self.settings.server == 'logging':
logger.warn('email not sent\n%s\nFrom: %s\nTo: %s\nSubject: %s\n\n%s\n%s\n' %
('-' * 40, sender,
', '.join(to), subject,
text or html, '-' * 40))
elif self.settings.server == 'gae':
xcc = dict()
if cc:
xcc['cc'] = cc
if bcc:
xcc['bcc'] = bcc
if reply_to:
xcc['reply_to'] = reply_to
from google.appengine.api import mail
attachments = attachments and [mail.Attachment(
a.my_filename,
a.my_payload,
contebt_id='<attachment-%s>' % k
) for k,a in enumerate(attachments) if not raw]
if attachments:
result = mail.send_mail(
sender=sender, to=origTo,
subject=subject, body=text, html=html,
attachments=attachments, **xcc)
elif html and (not raw):
result = mail.send_mail(
sender=sender, to=origTo,
subject=subject, body=text, html=html, **xcc)
else:
result = mail.send_mail(
sender=sender, to=origTo,
subject=subject, body=text, **xcc)
else:
smtp_args = self.settings.server.split(':')
kwargs = dict(timeout=self.settings.timeout)
if self.settings.ssl:
server = smtplib.SMTP_SSL(*smtp_args, **kwargs)
else:
server = smtplib.SMTP(*smtp_args, **kwargs)
if self.settings.tls and not self.settings.ssl:
server.ehlo(self.settings.hostname)
server.starttls()
server.ehlo(self.settings.hostname)
if self.settings.login:
server.login(*self.settings.login.split(':', 1))
result = server.sendmail(
sender, to, payload.as_string())
server.quit()
except Exception, e:
logger.warn('Mail.send failure:%s' % e)
self.result = result
self.error = e
return False
self.result = result
self.error = None
return True
class Recaptcha(DIV):
"""
Examples:
Use as::
form = FORM(Recaptcha(public_key='...',private_key='...'))
or::
form = SQLFORM(...)
form.append(Recaptcha(public_key='...',private_key='...'))
"""
API_SSL_SERVER = 'https://www.google.com/recaptcha/api'
API_SERVER = 'http://www.google.com/recaptcha/api'
VERIFY_SERVER = 'http://www.google.com/recaptcha/api/verify'
def __init__(self,
request=None,
public_key='',
private_key='',
use_ssl=False,
error=None,
error_message='invalid',
label='Verify:',
options='',
comment='',
ajax=False
):
request = request or current.request
self.request_vars = request and request.vars or current.request.vars
self.remote_addr = request.env.remote_addr
self.public_key = public_key
self.private_key = private_key
self.use_ssl = use_ssl
self.error = error
self.errors = Storage()
self.error_message = error_message
self.components = []
self.attributes = {}
self.label = label
self.options = options
self.comment = comment
self.ajax = ajax
def _validate(self):
# for local testing:
recaptcha_challenge_field = \
self.request_vars.recaptcha_challenge_field
recaptcha_response_field = \
self.request_vars.recaptcha_response_field
private_key = self.private_key
remoteip = self.remote_addr
if not (recaptcha_response_field and recaptcha_challenge_field
and len(recaptcha_response_field)
and len(recaptcha_challenge_field)):
self.errors['captcha'] = self.error_message
return False
params = urllib.urlencode({
'privatekey': private_key,
'remoteip': remoteip,
'challenge': recaptcha_challenge_field,
'response': recaptcha_response_field,
})
request = urllib2.Request(
url=self.VERIFY_SERVER,
data=params,
headers={'Content-type': 'application/x-www-form-urlencoded',
'User-agent': 'reCAPTCHA Python'})
httpresp = urllib2.urlopen(request)
return_values = httpresp.read().splitlines()
httpresp.close()
return_code = return_values[0]
if return_code == 'true':
del self.request_vars.recaptcha_challenge_field
del self.request_vars.recaptcha_response_field
self.request_vars.captcha = ''
return True
else:
# In case we get an error code, store it so we can get an error message
# from the /api/challenge URL as described in the reCAPTCHA api docs.
self.error = return_values[1]
self.errors['captcha'] = self.error_message
return False
def xml(self):
public_key = self.public_key
use_ssl = self.use_ssl
error_param = ''
if self.error:
error_param = '&error=%s' % self.error
if use_ssl:
server = self.API_SSL_SERVER
else:
server = self.API_SERVER
if not self.ajax:
captcha = DIV(
SCRIPT("var RecaptchaOptions = {%s};" % self.options),
SCRIPT(_type="text/javascript",
_src="%s/challenge?k=%s%s" % (server, public_key, error_param)),
TAG.noscript(
IFRAME(
_src="%s/noscript?k=%s%s" % (
server, public_key, error_param),
_height="300", _width="500", _frameborder="0"), BR(),
INPUT(
_type='hidden', _name='recaptcha_response_field',
_value='manual_challenge')), _id='recaptcha')
else: #use Google's ajax interface, needed for LOADed components
url_recaptcha_js = "%s/js/recaptcha_ajax.js" % server
RecaptchaOptions = "var RecaptchaOptions = {%s}" % self.options
script = """%(options)s;
jQuery.getScript('%(url)s',function() {
Recaptcha.create('%(public_key)s',
'recaptcha',jQuery.extend(RecaptchaOptions,{'callback':Recaptcha.focus_response_field}))
}) """ % ({'options': RecaptchaOptions, 'url': url_recaptcha_js, 'public_key': public_key})
captcha = DIV(
SCRIPT(
script,
_type="text/javascript",
),
TAG.noscript(
IFRAME(
_src="%s/noscript?k=%s%s" % (
server, public_key, error_param),
_height="300", _width="500", _frameborder="0"), BR(),
INPUT(
_type='hidden', _name='recaptcha_response_field',
_value='manual_challenge')), _id='recaptcha')
if not self.errors.captcha:
return XML(captcha).xml()
else:
captcha.append(DIV(self.errors['captcha'], _class='error'))
return XML(captcha).xml()
# this should only be used for catcha and perhaps not even for that
def addrow(form, a, b, c, style, _id, position=-1):
if style == "divs":
form[0].insert(position, DIV(DIV(LABEL(a), _class='w2p_fl'),
DIV(b, _class='w2p_fw'),
DIV(c, _class='w2p_fc'),
_id=_id))
elif style == "table2cols":
form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'),
TD(c, _class='w2p_fc')))
form[0].insert(position + 1, TR(TD(b, _class='w2p_fw'),
_colspan=2, _id=_id))
elif style == "ul":
form[0].insert(position, LI(DIV(LABEL(a), _class='w2p_fl'),
DIV(b, _class='w2p_fw'),
DIV(c, _class='w2p_fc'),
_id=_id))
elif style == "bootstrap":
form[0].insert(position, DIV(LABEL(a, _class='control-label'),
DIV(b, SPAN(c, _class='inline-help'),
_class='controls'),
_class='control-group', _id=_id))
else:
form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'),
TD(b, _class='w2p_fw'),
TD(c, _class='w2p_fc'), _id=_id))
class Auth(object):
default_settings = dict(
hideerror=False,
password_min_length=4,
cas_maps=None,
reset_password_requires_verification=False,
registration_requires_verification=False,
registration_requires_approval=False,
login_after_registration=False,
login_after_password_change=True,
alternate_requires_registration=False,
create_user_groups="user_%(id)s",
everybody_group_id=None,
manager_actions={},
auth_manager_role=None,
two_factor_authentication_group = None,
login_captcha=None,
register_captcha=None,
pre_registration_div=None,
retrieve_username_captcha=None,
retrieve_password_captcha=None,
captcha=None,
prevent_open_redirect_attacks=True,
prevent_password_reset_attacks=True,
expiration=3600, # one hour
long_expiration=3600 * 30 * 24, # one month
remember_me_form=True,
allow_basic_login=False,
allow_basic_login_only=False,
on_failed_authentication=lambda x: redirect(x),
formstyle=None,
label_separator=None,
logging_enabled = True,
allow_delete_accounts=False,
password_field='password',
table_user_name='auth_user',
table_group_name='auth_group',
table_membership_name='auth_membership',
table_permission_name='auth_permission',
table_event_name='auth_event',
table_cas_name='auth_cas',
table_user=None,
table_group=None,
table_membership=None,
table_permission=None,
table_event=None,
table_cas=None,
showid=False,
use_username=False,
login_email_validate=True,
login_userfield=None,
multi_login=False,
logout_onlogout=None,
register_fields=None,
register_verify_password=True,
profile_fields=None,
email_case_sensitive=True,
username_case_sensitive=True,
update_fields=['email'],
ondelete="CASCADE",
client_side=True,
renew_session_onlogin=True,
renew_session_onlogout=True,
keep_session_onlogin=True,
keep_session_onlogout=False,
wiki=Settings(),
)
# ## these are messages that can be customized
default_messages = dict(
login_button='Log In',
register_button='Sign Up',
password_reset_button='Request reset password',
password_change_button='Change password',
profile_save_button='Apply changes',
submit_button='Submit',
verify_password='Verify Password',
delete_label='Check to delete',
function_disabled='Function disabled',
access_denied='Insufficient privileges',
registration_verifying='Registration needs verification',
registration_pending='Registration is pending approval',
email_taken='This email already has an account',
invalid_username='Invalid username',
username_taken='Username already taken',
login_disabled='Login disabled by administrator',
logged_in='Logged in',
email_sent='Email sent',
unable_to_send_email='Unable to send email',
email_verified='Email verified',
logged_out='Logged out',
registration_successful='Registration successful',
invalid_email='Invalid email',
unable_send_email='Unable to send email',
invalid_login='Invalid login',
invalid_user='Invalid user',
invalid_password='Invalid password',
is_empty="Cannot be empty",
mismatched_password="Password fields don't match",
verify_email='Welcome %(username)s! Click on the link %(link)s to verify your email',
verify_email_subject='Email verification',
username_sent='Your username was emailed to you',
new_password_sent='A new password was emailed to you',
password_changed='Password changed',
retrieve_username='Your username is: %(username)s',
retrieve_username_subject='Username retrieve',
retrieve_password='Your password is: %(password)s',
retrieve_password_subject='Password retrieve',
reset_password='Click on the link %(link)s to reset your password',
reset_password_subject='Password reset',
invalid_reset_password='Invalid reset password',
profile_updated='Profile updated',
new_password='New password',
old_password='Old password',
group_description='Group uniquely assigned to user %(id)s',
register_log='User %(id)s Registered',
login_log='User %(id)s Logged-in',
login_failed_log=None,
logout_log='User %(id)s Logged-out',
profile_log='User %(id)s Profile updated',
verify_email_log='User %(id)s Verification email sent',
retrieve_username_log='User %(id)s Username retrieved',
retrieve_password_log='User %(id)s Password retrieved',
reset_password_log='User %(id)s Password reset',
change_password_log='User %(id)s Password changed',
add_group_log='Group %(group_id)s created',
del_group_log='Group %(group_id)s deleted',
add_membership_log=None,
del_membership_log=None,
has_membership_log=None,
add_permission_log=None,
del_permission_log=None,
has_permission_log=None,
impersonate_log='User %(id)s is impersonating %(other_id)s',
label_first_name='First name',
label_last_name='Last name',
label_username='Username',
label_email='E-mail',
label_password='Password',
label_registration_key='Registration key',
label_reset_password_key='Reset Password key',
label_registration_id='Registration identifier',
label_role='Role',
label_description='Description',
label_user_id='User ID',
label_group_id='Group ID',
label_name='Name',
label_table_name='Object or table name',
label_record_id='Record ID',
label_time_stamp='Timestamp',
label_client_ip='Client IP',
label_origin='Origin',
label_remember_me="Remember me (for 30 days)",
verify_password_comment='please input your password again',
)
"""
Class for authentication, authorization, role based access control.
Includes:
- registration and profile
- login and logout
- username and password retrieval
- event logging
- role creation and assignment
- user defined group/role based permission
Args:
environment: is there for legacy but unused (awful)
db: has to be the database where to create tables for authentication
mailer: `Mail(...)` or None (no mailer) or True (make a mailer)
hmac_key: can be a hmac_key or hmac_key=Auth.get_or_create_key()
controller: (where is the user action?)
cas_provider: (delegate authentication to the URL, CAS2)
Authentication Example::
from gluon.contrib.utils import *
mail=Mail()
mail.settings.server='smtp.gmail.com:587'
mail.settings.sender='you@somewhere.com'
mail.settings.login='username:password'
auth=Auth(db)
auth.settings.mailer=mail
# auth.settings....=...
auth.define_tables()
def authentication():
return dict(form=auth())
Exposes:
- `http://.../{application}/{controller}/authentication/login`
- `http://.../{application}/{controller}/authentication/logout`
- `http://.../{application}/{controller}/authentication/register`
- `http://.../{application}/{controller}/authentication/verify_email`
- `http://.../{application}/{controller}/authentication/retrieve_username`
- `http://.../{application}/{controller}/authentication/retrieve_password`
- `http://.../{application}/{controller}/authentication/reset_password`
- `http://.../{application}/{controller}/authentication/profile`
- `http://.../{application}/{controller}/authentication/change_password`
On registration a group with role=new_user.id is created
and user is given membership of this group.
You can create a group with::
group_id=auth.add_group('Manager', 'can access the manage action')
auth.add_permission(group_id, 'access to manage')
Here "access to manage" is just a user defined string.
You can give access to a user::
auth.add_membership(group_id, user_id)
If user id is omitted, the logged in user is assumed
Then you can decorate any action::
@auth.requires_permission('access to manage')
def manage():
return dict()
You can restrict a permission to a specific table::
auth.add_permission(group_id, 'edit', db.sometable)
@auth.requires_permission('edit', db.sometable)
Or to a specific record::
auth.add_permission(group_id, 'edit', db.sometable, 45)
@auth.requires_permission('edit', db.sometable, 45)
If authorization is not granted calls::
auth.settings.on_failed_authorization
Other options::
auth.settings.mailer=None
auth.settings.expiration=3600 # seconds
...
### these are messages that can be customized
...
"""
@staticmethod
def get_or_create_key(filename=None, alg='sha512'):
request = current.request
if not filename:
filename = os.path.join(request.folder, 'private', 'auth.key')
if os.path.exists(filename):
key = open(filename, 'r').read().strip()
else:
key = alg + ':' + web2py_uuid()
open(filename, 'w').write(key)
return key
def url(self, f=None, args=None, vars=None, scheme=False):
if args is None:
args = []
if vars is None:
vars = {}
return URL(c=self.settings.controller,
f=f, args=args, vars=vars, scheme=scheme)
def here(self):
return URL(args=current.request.args, vars=current.request.get_vars)
def __init__(self, environment=None, db=None, mailer=True,
hmac_key=None, controller='default', function='user',
cas_provider=None, signature=True, secure=False,
csrf_prevention=True, propagate_extension=None,
url_index=None):
## next two lines for backward compatibility
if not db and environment and isinstance(environment, DAL):
db = environment
self.db = db
self.environment = current
self.csrf_prevention = csrf_prevention
request = current.request
session = current.session
auth = session.auth
self.user_groups = auth and auth.user_groups or {}
if secure:
request.requires_https()
now = request.now
# if we have auth info
# if not expired it, used it
# if expired, clear the session
# else, only clear auth info in the session
if auth:
delta = datetime.timedelta(days=0, seconds=auth.expiration)
if auth.last_visit and auth.last_visit + delta > now:
self.user = auth.user
# this is a trick to speed up sessions to avoid many writes
if (now - auth.last_visit).seconds > (auth.expiration / 10):
auth.last_visit = request.now
else:
self.user = None
if session.auth:
del session.auth
session.renew(clear_session=True)
else:
self.user = None
if session.auth:
del session.auth
# ## what happens after login?
url_index = url_index or URL(controller, 'index')
url_login = URL(controller, function, args='login',
extension = propagate_extension)
# ## what happens after registration?
settings = self.settings = Settings()
settings.update(Auth.default_settings)
settings.update(
cas_domains=[request.env.http_host],
cas_provider=cas_provider,
cas_actions=dict(login='login',
validate='validate',
servicevalidate='serviceValidate',
proxyvalidate='proxyValidate',
logout='logout'),
extra_fields={},
actions_disabled=[],
controller=controller,
function=function,
login_url=url_login,
logged_url=URL(controller, function, args='profile'),
download_url=URL(controller, 'download'),
mailer=(mailer is True) and Mail() or mailer,
on_failed_authorization =
URL(controller, function, args='not_authorized'),
login_next = url_index,
login_onvalidation = [],
login_onaccept = [],
login_onfail = [],
login_methods = [self],
login_form = self,
logout_next = url_index,
logout_onlogout = None,
register_next = url_index,
register_onvalidation = [],
register_onaccept = [],
verify_email_next = url_login,
verify_email_onaccept = [],
profile_next = url_index,
profile_onvalidation = [],
profile_onaccept = [],
retrieve_username_next = url_index,
retrieve_password_next = url_index,
request_reset_password_next = url_login,
reset_password_next = url_index,
change_password_next = url_index,
change_password_onvalidation = [],
change_password_onaccept = [],
retrieve_password_onvalidation = [],
reset_password_onvalidation = [],
reset_password_onaccept = [],
hmac_key = hmac_key,
formstyle = current.response.formstyle,
label_separator = current.response.form_label_separator
)
settings.lock_keys = True
# ## these are messages that can be customized
messages = self.messages = Messages(current.T)
messages.update(Auth.default_messages)
messages.update(ajax_failed_authentication=
DIV(H4('NOT AUTHORIZED'),
'Please ',
A('login',
_href=self.settings.login_url +
('?_next=' + urllib.quote(current.request.env.http_web2py_component_location))
if current.request.env.http_web2py_component_location else ''),
' to view this content.',
_class='not-authorized alert alert-block'))
messages.lock_keys = True
# for "remember me" option
response = current.response
if auth and auth.remember_me:
# when user wants to be logged in for longer
response.session_cookie_expires = auth.expiration
if signature:
self.define_signature()
else:
self.signature = None
def get_vars_next(self):
next = current.request.vars._next
if isinstance(next, (list, tuple)):
next = next[0]
return next
def _get_user_id(self):
"""accessor for auth.user_id"""
return self.user and self.user.id or None
user_id = property(_get_user_id, doc="user.id or None")
def table_user(self):
return self.db[self.settings.table_user_name]
def table_group(self):
return self.db[self.settings.table_group_name]
def table_membership(self):
return self.db[self.settings.table_membership_name]
def table_permission(self):
return self.db[self.settings.table_permission_name]
def table_event(self):
return self.db[self.settings.table_event_name]
def table_cas(self):
return self.db[self.settings.table_cas_name]
def _HTTP(self, *a, **b):
"""
only used in lambda: self._HTTP(404)
"""
raise HTTP(*a, **b)
def __call__(self):
"""
Example:
Use as::
def authentication():
return dict(form=auth())
"""
request = current.request
args = request.args
if not args:
redirect(self.url(args='login', vars=request.vars))
elif args[0] in self.settings.actions_disabled:
raise HTTP(404)
if args[0] in ('login', 'logout', 'register', 'verify_email',
'retrieve_username', 'retrieve_password',
'reset_password', 'request_reset_password',
'change_password', 'profile', 'groups',
'impersonate', 'not_authorized'):
if len(request.args) >= 2 and args[0] == 'impersonate':
return getattr(self, args[0])(request.args[1])
else:
return getattr(self, args[0])()
elif args[0] == 'cas' and not self.settings.cas_provider:
if args(1) == self.settings.cas_actions['login']:
return self.cas_login(version=2)
elif args(1) == self.settings.cas_actions['validate']:
return self.cas_validate(version=1)
elif args(1) == self.settings.cas_actions['servicevalidate']:
return self.cas_validate(version=2, proxy=False)
elif args(1) == self.settings.cas_actions['proxyvalidate']:
return self.cas_validate(version=2, proxy=True)
elif args(1) == self.settings.cas_actions['logout']:
return self.logout(next=request.vars.service or DEFAULT)
else:
raise HTTP(404)
def navbar(self, prefix='Welcome', action=None,
separators=(' [ ', ' | ', ' ] '), user_identifier=DEFAULT,
referrer_actions=DEFAULT, mode='default'):
""" Navbar with support for more templates
This uses some code from the old navbar.
Args:
mode: see options for list of
"""
items = [] # Hold all menu items in a list
self.bar = '' # The final
T = current.T
referrer_actions = [] if not referrer_actions else referrer_actions
if not action:
action = self.url(self.settings.function)
request = current.request
if URL() == action:
next = ''
else:
next = '?_next=' + urllib.quote(URL(args=request.args,
vars=request.get_vars))
href = lambda function: '%s/%s%s' % (action, function, next
if referrer_actions is DEFAULT
or function in referrer_actions
else '')
if isinstance(prefix, str):
prefix = T(prefix)
if prefix:
prefix = prefix.strip() + ' '
def Anr(*a, **b):
b['_rel'] = 'nofollow'
return A(*a, **b)
if self.user_id: # User is logged in
logout_next = self.settings.logout_next
items.append({'name': T('Log Out'),
'href': '%s/logout?_next=%s' % (action,
urllib.quote(
logout_next)),
'icon': 'icon-off'})
if not 'profile' in self.settings.actions_disabled:
items.append({'name': T('Profile'), 'href': href('profile'),
'icon': 'icon-user'})
if not 'change_password' in self.settings.actions_disabled:
items.append({'name': T('Password'),
'href': href('change_password'),
'icon': 'icon-lock'})
if user_identifier is DEFAULT:
user_identifier = '%(first_name)s'
if callable(user_identifier):
user_identifier = user_identifier(self.user)
elif ((isinstance(user_identifier, str) or
type(user_identifier).__name__ == 'lazyT') and
re.search(r'%\(.+\)s', user_identifier)):
user_identifier = user_identifier % self.user
if not user_identifier:
user_identifier = ''
else: # User is not logged in
items.append({'name': T('Log In'), 'href': href('login'),
'icon': 'icon-off'})
if not 'register' in self.settings.actions_disabled:
items.append({'name': T('Sign Up'), 'href': href('register'),
'icon': 'icon-user'})
if not 'request_reset_password' in self.settings.actions_disabled:
items.append({'name': T('Lost password?'),
'href': href('request_reset_password'),
'icon': 'icon-lock'})
if (self.settings.use_username and not
'retrieve_username' in self.settings.actions_disabled):
items.append({'name': T('Forgot username?'),
'href': href('retrieve_username'),
'icon': 'icon-edit'})
def menu(): # For inclusion in MENU
self.bar = [(items[0]['name'], False, items[0]['href'], [])]
del items[0]
for item in items:
self.bar[0][3].append((item['name'], False, item['href']))
def bootstrap3(): # Default web2py scaffolding
def rename(icon): return icon+' '+icon.replace('icon', 'glyphicon')
self.bar = UL(LI(Anr(I(_class=rename('icon '+items[0]['icon'])),
' ' + items[0]['name'],
_href=items[0]['href'])), _class='dropdown-menu')
del items[0]
for item in items:
self.bar.insert(-1, LI(Anr(I(_class=rename('icon '+item['icon'])),
' ' + item['name'],
_href=item['href'])))
self.bar.insert(-1, LI('', _class='divider'))
if self.user_id:
self.bar = LI(Anr(prefix, user_identifier,
_href='#', _class="dropdown-toggle",
data={'toggle': 'dropdown'}),
self.bar, _class='dropdown')
else:
self.bar = LI(Anr(T('Log In'),
_href='#', _class="dropdown-toggle",
data={'toggle': 'dropdown'}), self.bar,
_class='dropdown')
def bare():
""" In order to do advanced customization we only need the
prefix, the user_identifier and the href attribute of items
Examples:
Use as::
# in module custom_layout.py
from gluon import *
def navbar(auth_navbar):
bar = auth_navbar
user = bar["user"]
if not user:
btn_login = A(current.T("Login"),
_href=bar["login"],
_class="btn btn-success",
_rel="nofollow")
btn_register = A(current.T("Sign up"),
_href=bar["register"],
_class="btn btn-primary",
_rel="nofollow")
return DIV(btn_register, btn_login, _class="btn-group")
else:
toggletext = "%s back %s" % (bar["prefix"], user)
toggle = A(toggletext,
_href="#",
_class="dropdown-toggle",
_rel="nofollow",
**{"_data-toggle": "dropdown"})
li_profile = LI(A(I(_class="icon-user"), ' ',
current.T("Account details"),
_href=bar["profile"], _rel="nofollow"))
li_custom = LI(A(I(_class="icon-book"), ' ',
current.T("My Agenda"),
_href="#", rel="nofollow"))
li_logout = LI(A(I(_class="icon-off"), ' ',
current.T("logout"),
_href=bar["logout"], _rel="nofollow"))
dropdown = UL(li_profile,
li_custom,
LI('', _class="divider"),
li_logout,
_class="dropdown-menu", _role="menu")
return LI(toggle, dropdown, _class="dropdown")
# in models db.py
import custom_layout as custom
# in layout.html
<ul id="navbar" class="nav pull-right">
{{='auth' in globals() and \
custom.navbar(auth.navbar(mode='bare')) or ''}}</ul>
"""
bare = {}
bare['prefix'] = prefix
bare['user'] = user_identifier if self.user_id else None
for i in items:
if i['name'] == T('Log In'):
k = 'login'
elif i['name'] == T('Sign Up'):
k = 'register'
elif i['name'] == T('Lost password?'):
k = 'request_reset_password'
elif i['name'] == T('Forgot username?'):
k = 'retrieve_username'
elif i['name'] == T('Log Out'):
k = 'logout'
elif i['name'] == T('Profile'):
k = 'profile'
elif i['name'] == T('Password'):
k = 'change_password'
bare[k] = i['href']
self.bar = bare
options = {'asmenu': menu,
'dropdown': bootstrap3,
'bare': bare
} # Define custom modes.
if mode in options and callable(options[mode]):
options[mode]()
else:
s1, s2, s3 = separators
if self.user_id:
self.bar = SPAN(prefix, user_identifier, s1,
Anr(items[0]['name'],
_href=items[0]['href']), s3,
_class='auth_navbar')
else:
self.bar = SPAN(s1, Anr(items[0]['name'],
_href=items[0]['href']), s3,
_class='auth_navbar')
for item in items[1:]:
self.bar.insert(-1, s2)
self.bar.insert(-1, Anr(item['name'], _href=item['href']))
return self.bar
def __get_migrate(self, tablename, migrate=True):
if type(migrate).__name__ == 'str':
return (migrate + tablename + '.table')
elif migrate == False:
return False
else:
return True
def enable_record_versioning(self,
tables,
archive_db=None,
archive_names='%(tablename)s_archive',
current_record='current_record',
current_record_label=None):
"""
Used to enable full record versioning (including auth tables)::
auth = Auth(db)
auth.define_tables(signature=True)
# define our own tables
db.define_table('mything',Field('name'),auth.signature)
auth.enable_record_versioning(tables=db)
tables can be the db (all table) or a list of tables.
only tables with modified_by and modified_on fiels (as created
by auth.signature) will have versioning. Old record versions will be
in table 'mything_archive' automatically defined.
when you enable enable_record_versioning, records are never
deleted but marked with is_active=False.
enable_record_versioning enables a common_filter for
every table that filters out records with is_active = False
Note:
If you use auth.enable_record_versioning,
do not use auth.archive or you will end up with duplicates.
auth.archive does explicitly what enable_record_versioning
does automatically.
"""
current_record_label = current_record_label or current.T(
current_record.replace('_', ' ').title())
for table in tables:
fieldnames = table.fields()
if ('id' in fieldnames and
'modified_on' in fieldnames and
not current_record in fieldnames):
table._enable_record_versioning(
archive_db=archive_db,
archive_name=archive_names,
current_record=current_record,
current_record_label=current_record_label)
def define_signature(self):
db = self.db
settings = self.settings
request = current.request
T = current.T
reference_user = 'reference %s' % settings.table_user_name
def lazy_user(auth=self):
return auth.user_id
def represent(id, record=None, s=settings):
try:
user = s.table_user(id)
return '%s %s' % (user.get("first_name", user.get("email")),
user.get("last_name", ''))
except:
return id
ondelete = self.settings.ondelete
self.signature = Table(
self.db, 'auth_signature',
Field('is_active', 'boolean',
default=True,
readable=False, writable=False,
label=T('Is Active')),
Field('created_on', 'datetime',
default=request.now,
writable=False, readable=False,
label=T('Created On')),
Field('created_by',
reference_user,
default=lazy_user, represent=represent,
writable=False, readable=False,
label=T('Created By'), ondelete=ondelete),
Field('modified_on', 'datetime',
update=request.now, default=request.now,
writable=False, readable=False,
label=T('Modified On')),
Field('modified_by',
reference_user, represent=represent,
default=lazy_user, update=lazy_user,
writable=False, readable=False,
label=T('Modified By'), ondelete=ondelete))
def define_tables(self, username=None, signature=None,
migrate=None, fake_migrate=None):
"""
To be called unless tables are defined manually
Examples:
Use as::
# defines all needed tables and table files
# 'myprefix_auth_user.table', ...
auth.define_tables(migrate='myprefix_')
# defines all needed tables without migration/table files
auth.define_tables(migrate=False)
"""
db = self.db
if migrate is None:
migrate = db._migrate
if fake_migrate is None:
fake_migrate = db._fake_migrate
settings = self.settings
if username is None:
username = settings.use_username
else:
settings.use_username = username
if not self.signature:
self.define_signature()
if signature == True:
signature_list = [self.signature]
elif not signature:
signature_list = []
elif isinstance(signature, Table):
signature_list = [signature]
else:
signature_list = signature
is_not_empty = IS_NOT_EMPTY(error_message=self.messages.is_empty)
is_crypted = CRYPT(key=settings.hmac_key,
min_length=settings.password_min_length)
is_unique_email = [
IS_EMAIL(error_message=self.messages.invalid_email),
IS_NOT_IN_DB(db, '%s.email' % settings.table_user_name,
error_message=self.messages.email_taken)]
if not settings.email_case_sensitive:
is_unique_email.insert(1, IS_LOWER())
if not settings.table_user_name in db.tables:
passfield = settings.password_field
extra_fields = settings.extra_fields.get(
settings.table_user_name, []) + signature_list
if username or settings.cas_provider:
is_unique_username = \
[IS_MATCH('[\w\.\-]+', strict=True,
error_message=self.messages.invalid_username),
IS_NOT_IN_DB(db, '%s.username' % settings.table_user_name,
error_message=self.messages.username_taken)]
if not settings.username_case_sensitive:
is_unique_username.insert(1, IS_LOWER())
db.define_table(
settings.table_user_name,
Field('first_name', length=128, default='',
label=self.messages.label_first_name,
requires=is_not_empty),
Field('last_name', length=128, default='',
label=self.messages.label_last_name,
requires=is_not_empty),
Field('email', length=512, default='',
label=self.messages.label_email,
requires=is_unique_email),
Field('username', length=128, default='',
label=self.messages.label_username,
requires=is_unique_username),
Field(passfield, 'password', length=512,
readable=False, label=self.messages.label_password,
requires=[is_crypted]),
Field('registration_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_key),
Field('reset_password_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_reset_password_key),
Field('registration_id', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_id),
*extra_fields,
**dict(
migrate=self.__get_migrate(settings.table_user_name,
migrate),
fake_migrate=fake_migrate,
format='%(username)s'))
else:
db.define_table(
settings.table_user_name,
Field('first_name', length=128, default='',
label=self.messages.label_first_name,
requires=is_not_empty),
Field('last_name', length=128, default='',
label=self.messages.label_last_name,
requires=is_not_empty),
Field('email', length=512, default='',
label=self.messages.label_email,
requires=is_unique_email),
Field(passfield, 'password', length=512,
readable=False, label=self.messages.label_password,
requires=[is_crypted]),
Field('registration_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_key),
Field('reset_password_key', length=512,
writable=False, readable=False, default='',
label=self.messages.label_reset_password_key),
Field('registration_id', length=512,
writable=False, readable=False, default='',
label=self.messages.label_registration_id),
*extra_fields,
**dict(
migrate=self.__get_migrate(settings.table_user_name,
migrate),
fake_migrate=fake_migrate,
format='%(first_name)s %(last_name)s (%(id)s)'))
reference_table_user = 'reference %s' % settings.table_user_name
if not settings.table_group_name in db.tables:
extra_fields = settings.extra_fields.get(
settings.table_group_name, []) + signature_list
db.define_table(
settings.table_group_name,
Field('role', length=512, default='',
label=self.messages.label_role,
requires=IS_NOT_IN_DB(db, '%s.role' % settings.table_group_name)),
Field('description', 'text',
label=self.messages.label_description),
*extra_fields,
**dict(
migrate=self.__get_migrate(
settings.table_group_name, migrate),
fake_migrate=fake_migrate,
format='%(role)s (%(id)s)'))
reference_table_group = 'reference %s' % settings.table_group_name
if not settings.table_membership_name in db.tables:
extra_fields = settings.extra_fields.get(
settings.table_membership_name, []) + signature_list
db.define_table(
settings.table_membership_name,
Field('user_id', reference_table_user,
label=self.messages.label_user_id),
Field('group_id', reference_table_group,
label=self.messages.label_group_id),
*extra_fields,
**dict(
migrate=self.__get_migrate(
settings.table_membership_name, migrate),
fake_migrate=fake_migrate))
if not settings.table_permission_name in db.tables:
extra_fields = settings.extra_fields.get(
settings.table_permission_name, []) + signature_list
db.define_table(
settings.table_permission_name,
Field('group_id', reference_table_group,
label=self.messages.label_group_id),
Field('name', default='default', length=512,
label=self.messages.label_name,
requires=is_not_empty),
Field('table_name', length=512,
label=self.messages.label_table_name),
Field('record_id', 'integer', default=0,
label=self.messages.label_record_id,
requires=IS_INT_IN_RANGE(0, 10 ** 9)),
*extra_fields,
**dict(
migrate=self.__get_migrate(
settings.table_permission_name, migrate),
fake_migrate=fake_migrate))
if not settings.table_event_name in db.tables:
db.define_table(
settings.table_event_name,
Field('time_stamp', 'datetime',
default=current.request.now,
label=self.messages.label_time_stamp),
Field('client_ip',
default=current.request.client,
label=self.messages.label_client_ip),
Field('user_id', reference_table_user, default=None,
label=self.messages.label_user_id),
Field('origin', default='auth', length=512,
label=self.messages.label_origin,
requires=is_not_empty),
Field('description', 'text', default='',
label=self.messages.label_description,
requires=is_not_empty),
*settings.extra_fields.get(settings.table_event_name, []),
**dict(
migrate=self.__get_migrate(
settings.table_event_name, migrate),
fake_migrate=fake_migrate))
now = current.request.now
if settings.cas_domains:
if not settings.table_cas_name in db.tables:
db.define_table(
settings.table_cas_name,
Field('user_id', reference_table_user, default=None,
label=self.messages.label_user_id),
Field('created_on', 'datetime', default=now),
Field('service', requires=IS_URL()),
Field('ticket'),
Field('renew', 'boolean', default=False),
*settings.extra_fields.get(settings.table_cas_name, []),
**dict(
migrate=self.__get_migrate(
settings.table_cas_name, migrate),
fake_migrate=fake_migrate))
if not db._lazy_tables:
settings.table_user = db[settings.table_user_name]
settings.table_group = db[settings.table_group_name]
settings.table_membership = db[settings.table_membership_name]
settings.table_permission = db[settings.table_permission_name]
settings.table_event = db[settings.table_event_name]
if settings.cas_domains:
settings.table_cas = db[settings.table_cas_name]
if settings.cas_provider: # THIS IS NOT LAZY
settings.actions_disabled = \
['profile', 'register', 'change_password',
'request_reset_password', 'retrieve_username']
from gluon.contrib.login_methods.cas_auth import CasAuth
maps = settings.cas_maps
if not maps:
table_user = self.table_user()
maps = dict((name, lambda v, n=name: v.get(n, None)) for name in
table_user.fields if name != 'id'
and table_user[name].readable)
maps['registration_id'] = \
lambda v, p=settings.cas_provider: '%s/%s' % (p, v['user'])
actions = [settings.cas_actions['login'],
settings.cas_actions['servicevalidate'],
settings.cas_actions['logout']]
settings.login_form = CasAuth(
casversion=2,
urlbase=settings.cas_provider,
actions=actions,
maps=maps)
return self
def log_event(self, description, vars=None, origin='auth'):
"""
Examples:
Use as::
auth.log_event(description='this happened', origin='auth')
"""
if not self.settings.logging_enabled or not description:
return
elif self.is_logged_in():
user_id = self.user.id
else:
user_id = None # user unknown
vars = vars or {}
# log messages should not be translated
if type(description).__name__ == 'lazyT':
description = description.m
self.table_event().insert(
description=str(description % vars),
origin=origin, user_id=user_id)
def get_or_create_user(self, keys, update_fields=['email'],
login=True, get=True):
"""
Used for alternate login methods:
If the user exists already then password is updated.
If the user doesn't yet exist, then they are created.
"""
table_user = self.table_user()
user = None
checks = []
# make a guess about who this user is
for fieldname in ['registration_id', 'username', 'email']:
if fieldname in table_user.fields() and \
keys.get(fieldname, None):
checks.append(fieldname)
value = keys[fieldname]
user = table_user(**{fieldname: value})
if user:
break
if not checks:
return None
if not 'registration_id' in keys:
keys['registration_id'] = keys[checks[0]]
# if we think we found the user but registration_id does not match,
# make new user
if 'registration_id' in checks \
and user \
and user.registration_id \
and ('registration_id' not in keys or user.registration_id != str(keys['registration_id'])):
user = None # THINK MORE ABOUT THIS? DO WE TRUST OPENID PROVIDER?
if user:
if not get:
# added for register_bare to avoid overwriting users
return None
update_keys = dict(registration_id=keys['registration_id'])
for key in update_fields:
if key in keys:
update_keys[key] = keys[key]
user.update_record(**update_keys)
elif checks:
if not 'first_name' in keys and 'first_name' in table_user.fields:
guess = keys.get('email', 'anonymous').split('@')[0]
keys['first_name'] = keys.get('username', guess)
user_id = table_user.insert(**table_user._filter_fields(keys))
user = table_user[user_id]
if self.settings.create_user_groups:
group_id = self.add_group(
self.settings.create_user_groups % user)
self.add_membership(group_id, user_id)
if self.settings.everybody_group_id:
self.add_membership(self.settings.everybody_group_id, user_id)
if login:
self.user = user
return user
def basic(self, basic_auth_realm=False):
"""
Performs basic login.
Args:
basic_auth_realm: optional basic http authentication realm. Can take
str or unicode or function or callable or boolean.
reads current.request.env.http_authorization
and returns basic_allowed,basic_accepted,user.
if basic_auth_realm is defined is a callable it's return value
is used to set the basic authentication realm, if it's a string
its content is used instead. Otherwise basic authentication realm
is set to the application name.
If basic_auth_realm is None or False (the default) the behavior
is to skip sending any challenge.
"""
if not self.settings.allow_basic_login:
return (False, False, False)
basic = current.request.env.http_authorization
if basic_auth_realm:
if callable(basic_auth_realm):
basic_auth_realm = basic_auth_realm()
elif isinstance(basic_auth_realm, (unicode, str)):
basic_realm = unicode(basic_auth_realm)
elif basic_auth_realm is True:
basic_realm = u'' + current.request.application
http_401 = HTTP(401, u'Not Authorized', **{'WWW-Authenticate': u'Basic realm="' + basic_realm + '"'})
if not basic or not basic[:6].lower() == 'basic ':
if basic_auth_realm:
raise http_401
return (True, False, False)
(username, sep, password) = base64.b64decode(basic[6:]).partition(':')
is_valid_user = sep and self.login_bare(username, password)
if not is_valid_user and basic_auth_realm:
raise http_401
return (True, True, is_valid_user)
def login_user(self, user):
"""
Logins the `user = db.auth_user(id)`
"""
from gluon.settings import global_settings
if global_settings.web2py_runtime_gae:
user = Row(self.table_user()._filter_fields(user, id=True))
delattr(user, 'password')
else:
user = Row(user)
for key, value in user.items():
if callable(value) or key == 'password':
delattr(user, key)
if self.settings.renew_session_onlogin:
current.session.renew(clear_session=not self.settings.keep_session_onlogin)
current.session.auth = Storage(user=user,
last_visit=current.request.now,
expiration=self.settings.expiration,
hmac_key=web2py_uuid())
self.user = user
self.update_groups()
def _get_login_settings(self):
table_user = self.table_user()
userfield = self.settings.login_userfield or 'username' \
if 'username' in table_user.fields else 'email'
passfield = self.settings.password_field
return Storage({"table_user": table_user,
"userfield": userfield,
"passfield": passfield})
def login_bare(self, username, password):
"""
Logins user as specified by username (or email) and password
"""
settings = self._get_login_settings()
user = settings.table_user(**{settings.userfield: \
username})
if user and user.get(settings.passfield, False):
password = settings.table_user[
settings.passfield].validate(password)[0]
if ((user.registration_key is None or
not user.registration_key.strip()) and
password == user[settings.passfield]):
self.login_user(user)
return user
else:
# user not in database try other login methods
for login_method in self.settings.login_methods:
if login_method != self and login_method(username, password):
self.user = username
return username
return False
def register_bare(self, **fields):
"""
Registers a user as specified by username (or email)
and a raw password.
"""
settings = self._get_login_settings()
if not fields.get(settings.passfield):
raise ValueError("register_bare: " +
"password not provided or invalid")
elif not fields.get(settings.userfield):
raise ValueError("register_bare: " +
"userfield not provided or invalid")
fields[settings.passfield] = settings.table_user[settings.passfield].validate(fields[settings.passfield])[0]
user = self.get_or_create_user(fields, login=False, get=False, update_fields=self.settings.update_fields)
if not user:
# get or create did not create a user (it ignores duplicate records)
return False
return user
def cas_login(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
version=2,
):
request = current.request
response = current.response
session = current.session
db, table = self.db, self.table_cas()
session._cas_service = request.vars.service or session._cas_service
if not request.env.http_host in self.settings.cas_domains or \
not session._cas_service:
raise HTTP(403, 'not authorized')
def allow_access(interactivelogin=False):
row = table(service=session._cas_service, user_id=self.user.id)
if row:
ticket = row.ticket
else:
ticket = 'ST-' + web2py_uuid()
table.insert(service=session._cas_service,
user_id=self.user.id,
ticket=ticket,
created_on=request.now,
renew=interactivelogin)
service = session._cas_service
query_sep = '&' if '?' in service else '?'
del session._cas_service
if 'warn' in request.vars and not interactivelogin:
response.headers[
'refresh'] = "5;URL=%s" % service + query_sep + "ticket=" + ticket
return A("Continue to %s" % service,
_href=service + query_sep + "ticket=" + ticket)
else:
redirect(service + query_sep + "ticket=" + ticket)
if self.is_logged_in() and not 'renew' in request.vars:
return allow_access()
elif not self.is_logged_in() and 'gateway' in request.vars:
redirect(service)
def cas_onaccept(form, onaccept=onaccept):
if not onaccept is DEFAULT:
onaccept(form)
return allow_access(interactivelogin=True)
return self.login(next, onvalidation, cas_onaccept, log)
def cas_validate(self, version=2, proxy=False):
request = current.request
db, table = self.db, self.table_cas()
current.response.headers['Content-Type'] = 'text'
ticket = request.vars.ticket
renew = 'renew' in request.vars
row = table(ticket=ticket)
success = False
if row:
userfield = self.settings.login_userfield or 'username' \
if 'username' in table.fields else 'email'
# If ticket is a service Ticket and RENEW flag respected
if ticket[0:3] == 'ST-' and \
not ((row.renew and renew) ^ renew):
user = self.table_user()(row.user_id)
row.delete_record()
success = True
def build_response(body):
return '<?xml version="1.0" encoding="UTF-8"?>\n' +\
TAG['cas:serviceResponse'](
body, **{'_xmlns:cas': 'http://www.yale.edu/tp/cas'}).xml()
if success:
if version == 1:
message = 'yes\n%s' % user[userfield]
else: # assume version 2
username = user.get('username', user[userfield])
message = build_response(
TAG['cas:authenticationSuccess'](
TAG['cas:user'](username),
*[TAG['cas:' + field.name](user[field.name])
for field in self.table_user()
if field.readable]))
else:
if version == 1:
message = 'no\n'
elif row:
message = build_response(TAG['cas:authenticationFailure']())
else:
message = build_response(
TAG['cas:authenticationFailure'](
'Ticket %s not recognized' % ticket,
_code='INVALID TICKET'))
raise HTTP(200, message)
def _reset_two_factor_auth(self, session):
"""When two-step authentication is enabled, this function is used to
clear the session after successfully completing second challenge
or when the maximum number of tries allowed has expired.
"""
session.auth_two_factor_user = None
session.auth_two_factor = None
session.auth_two_factor_enabled = False
# Allow up to 4 attempts (the 1st one plus 3 more)
session.auth_two_factor_tries_left = 3
def login(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a login form
"""
table_user = self.table_user()
settings = self.settings
if 'username' in table_user.fields or \
not settings.login_email_validate:
tmpvalidator = IS_NOT_EMPTY(error_message=self.messages.is_empty)
if not settings.username_case_sensitive:
tmpvalidator = [IS_LOWER(), tmpvalidator]
else:
tmpvalidator = IS_EMAIL(error_message=self.messages.invalid_email)
if not settings.email_case_sensitive:
tmpvalidator = [IS_LOWER(), tmpvalidator]
request = current.request
response = current.response
session = current.session
passfield = settings.password_field
try:
table_user[passfield].requires[-1].min_length = 0
except:
pass
### use session for federated login
snext = self.get_vars_next()
if snext and self.settings.prevent_open_redirect_attacks:
items = snext.split('/')
if '//' in snext and items[2] != request.env.http_host:
snext = None
if snext:
session._auth_next = snext
elif session._auth_next:
snext = session._auth_next
### pass
if next is DEFAULT:
# important for security
next = settings.login_next
if callable(next):
next = next()
user_next = snext
if user_next:
external = user_next.split('://')
if external[0].lower() in ['http', 'https', 'ftp']:
host_next = user_next.split('//', 1)[-1].split('/')[0]
if host_next in settings.cas_domains:
next = user_next
else:
next = user_next
if onvalidation is DEFAULT:
onvalidation = settings.login_onvalidation
if onaccept is DEFAULT:
onaccept = settings.login_onaccept
if log is DEFAULT:
log = self.messages['login_log']
onfail = settings.login_onfail
user = None # default
#Setup the default field used for the form
multi_login = False
if self.settings.login_userfield:
username = self.settings.login_userfield
else:
if 'username' in table_user.fields:
username = 'username'
else:
username = 'email'
if self.settings.multi_login:
multi_login = True
old_requires = table_user[username].requires
table_user[username].requires = tmpvalidator
# If two-factor authentication is enabled, and the maximum
# number of tries allowed is used up, reset the session to
# pre-login state with two-factor auth
if session.auth_two_factor_enabled and session.auth_two_factor_tries_left < 1:
# Exceeded maximum allowed tries for this code. Require user to enter
# username and password again.
user = None
accepted_form = False
self._reset_two_factor_auth(session)
# Redirect to the default 'next' page without logging
# in. If that page requires login, user will be redirected
# back to the main login form
redirect(next, client_side=settings.client_side)
# Before showing the default login form, check whether
# we are already on the second step of two-step authentication.
# If we are, then skip this login form and use the form for the
# second challenge instead.
# Note to devs: The code inside the if-block is unchanged from the
# previous version of this file, other than for indentation inside
# to put it inside the if-block
if session.auth_two_factor_user is None:
if settings.remember_me_form:
extra_fields = [
Field('remember_me', 'boolean', default=False,
label = self.messages.label_remember_me)]
else:
extra_fields = []
# do we use our own login form, or from a central source?
if settings.login_form == self:
form = SQLFORM(
table_user,
fields=[username, passfield],
hidden=dict(_next=next),
showid=settings.showid,
submit_button=self.messages.login_button,
delete_label=self.messages.delete_label,
formstyle=settings.formstyle,
separator=settings.label_separator,
extra_fields = extra_fields,
)
captcha = settings.login_captcha or \
(settings.login_captcha != False and settings.captcha)
if captcha:
addrow(form, captcha.label, captcha, captcha.comment,
settings.formstyle, 'captcha__row')
accepted_form = False
if form.accepts(request, session if self.csrf_prevention else None,
formname='login', dbio=False,
onvalidation=onvalidation,
hideerror=settings.hideerror):
accepted_form = True
# check for username in db
entered_username = form.vars[username]
if multi_login and '@' in entered_username:
# if '@' in username check for email, not username
user = table_user(email = entered_username)
else:
user = table_user(**{username: entered_username})
if user:
# user in db, check if registration pending or disabled
temp_user = user
if temp_user.registration_key == 'pending':
response.flash = self.messages.registration_pending
return form
elif temp_user.registration_key in ('disabled', 'blocked'):
response.flash = self.messages.login_disabled
return form
elif (not temp_user.registration_key is None
and temp_user.registration_key.strip()):
response.flash = \
self.messages.registration_verifying
return form
# try alternate logins 1st as these have the
# current version of the password
user = None
for login_method in settings.login_methods:
if login_method != self and \
login_method(request.vars[username],
request.vars[passfield]):
if not self in settings.login_methods:
# do not store password in db
form.vars[passfield] = None
user = self.get_or_create_user(
form.vars, settings.update_fields)
break
if not user:
# alternates have failed, maybe because service inaccessible
if settings.login_methods[0] == self:
# try logging in locally using cached credentials
if form.vars.get(passfield, '') == temp_user[passfield]:
# success
user = temp_user
else:
# user not in db
if not settings.alternate_requires_registration:
# we're allowed to auto-register users from external systems
for login_method in settings.login_methods:
if login_method != self and \
login_method(request.vars[username],
request.vars[passfield]):
if not self in settings.login_methods:
# do not store password in db
form.vars[passfield] = None
user = self.get_or_create_user(
form.vars, settings.update_fields)
break
if not user:
self.log_event(self.messages['login_failed_log'],
request.post_vars)
# invalid login
session.flash = self.messages.invalid_login
callback(onfail, None)
redirect(
self.url(args=request.args, vars=request.get_vars),
client_side=settings.client_side)
else: # use a central authentication server
cas = settings.login_form
cas_user = cas.get_user()
if cas_user:
cas_user[passfield] = None
user = self.get_or_create_user(
table_user._filter_fields(cas_user),
settings.update_fields)
elif hasattr(cas, 'login_form'):
return cas.login_form()
else:
# we need to pass through login again before going on
next = self.url(settings.function, args='login')
redirect(cas.login_url(next),
client_side=settings.client_side)
# Extra login logic for two-factor authentication
#################################################
# If the 'user' variable has a value, this means that the first
# authentication step was successful (i.e. user provided correct
# username and password at the first challenge).
# Check if this user is signed up for two-factor authentication
# Default rule is that the user must be part of a group that is called
# auth.settings.two_factor_authentication_group
if user and self.settings.two_factor_authentication_group:
role = self.settings.two_factor_authentication_group
session.auth_two_factor_enabled = self.has_membership(user_id=user.id, role=role)
# challenge
if session.auth_two_factor_enabled:
form = SQLFORM.factory(
Field('authentication_code',
required=True,
comment='This code was emailed to you and is required for login.'),
hidden=dict(_next=next),
formstyle=settings.formstyle,
separator=settings.label_separator
)
# accepted_form is used by some default web2py code later in the
# function that handles running specified functions before redirect
# Set it to False until the challenge form is accepted.
accepted_form = False
# Handle the case when a user has submitted the login/password
# form successfully, and the password has been validated, but
# the two-factor form has not been displayed or validated yet.
if session.auth_two_factor_user is None and user is not None:
session.auth_two_factor_user = user # store the validated user and associate with this session
session.auth_two_factor = random.randint(100000, 999999)
session.auth_two_factor_tries_left = 3 # Allow user to try up to 4 times
# TODO: Add some error checking to handle cases where email cannot be sent
self.settings.mailer.send(
to=user.email,
subject="Two-step Login Authentication Code",
message="Your temporary login code is {0}".format(session.auth_two_factor))
if form.accepts(request, session if self.csrf_prevention else None,
formname='login', dbio=False,
onvalidation=onvalidation,
hideerror=settings.hideerror):
accepted_form = True
if form.vars['authentication_code'] == str(session.auth_two_factor):
# Handle the case when the two-factor form has been successfully validated
# and the user was previously stored (the current user should be None because
# in this case, the previous username/password login form should not be displayed.
# This will allow the code after the 2-factor authentication block to proceed as
# normal.
if user is None or user == session.auth_two_factor_user:
user = session.auth_two_factor_user
# For security, because the username stored in the
# session somehow does not match the just validated
# user. Should not be possible without session stealing
# which is hard with SSL.
elif user != session.auth_two_factor_user:
user = None
# Either way, the user and code associated with this session should
# be removed. This handles cases where the session login may have
# expired but browser window is open, so the old session key and
# session usernamem will still exist
self._reset_two_factor_auth(session)
else:
# TODO: Limit the number of retries allowed.
response.flash = 'Incorrect code. {0} more attempt(s) remaining.'.format(session.auth_two_factor_tries_left)
session.auth_two_factor_tries_left -= 1
return form
else:
return form
# End login logic for two-factor authentication
# process authenticated users
if user:
user = Row(table_user._filter_fields(user, id=True))
# process authenticated users
# user wants to be logged in for longer
self.login_user(user)
session.auth.expiration = \
request.post_vars.remember_me and \
settings.long_expiration or \
settings.expiration
session.auth.remember_me = 'remember_me' in request.post_vars
self.log_event(log, user)
session.flash = self.messages.logged_in
# how to continue
if settings.login_form == self:
if accepted_form:
callback(onaccept, form)
if next == session._auth_next:
session._auth_next = None
next = replace_id(next, form)
redirect(next, client_side=settings.client_side)
table_user[username].requires = old_requires
return form
elif user:
callback(onaccept, None)
if next == session._auth_next:
del session._auth_next
redirect(next, client_side=settings.client_side)
def logout(self, next=DEFAULT, onlogout=DEFAULT, log=DEFAULT):
"""
Logouts and redirects to login
"""
# Clear out 2-step authentication information if user logs
# out. This information is also cleared on successful login.
self._reset_two_factor_auth(current.session)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.logout_next
if onlogout is DEFAULT:
onlogout = self.settings.logout_onlogout
if onlogout:
onlogout(self.user)
if log is DEFAULT:
log = self.messages['logout_log']
if self.user:
self.log_event(log, self.user)
if self.settings.login_form != self:
cas = self.settings.login_form
cas_user = cas.get_user()
if cas_user:
next = cas.logout_url(next)
current.session.auth = None
if self.settings.renew_session_onlogout:
current.session.renew(clear_session=not self.settings.keep_session_onlogout)
current.session.flash = self.messages.logged_out
if not next is None:
redirect(next)
def register(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a registration form
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
if self.is_logged_in():
redirect(self.settings.logged_url,
client_side=self.settings.client_side)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.register_next
if onvalidation is DEFAULT:
onvalidation = self.settings.register_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.register_onaccept
if log is DEFAULT:
log = self.messages['register_log']
table_user = self.table_user()
if self.settings.login_userfield:
username = self.settings.login_userfield
elif 'username' in table_user.fields:
username = 'username'
else:
username = 'email'
# Ensure the username field is unique.
unique_validator = IS_NOT_IN_DB(self.db, table_user[username])
if not table_user[username].requires:
table_user[username].requires = unique_validator
elif isinstance(table_user[username].requires, (list, tuple)):
if not any([isinstance(validator, IS_NOT_IN_DB) for validator in
table_user[username].requires]):
if isinstance(table_user[username].requires, list):
table_user[username].requires.append(unique_validator)
else:
table_user[username].requires += (unique_validator, )
elif not isinstance(table_user[username].requires, IS_NOT_IN_DB):
table_user[username].requires = [table_user[username].requires,
unique_validator]
passfield = self.settings.password_field
formstyle = self.settings.formstyle
if self.settings.register_verify_password:
extra_fields = [
Field("password_two", "password", requires=IS_EQUAL_TO(
request.post_vars.get(passfield, None),
error_message=self.messages.mismatched_password),
label=current.T("Confirm Password"))]
else:
extra_fields = []
form = SQLFORM(table_user,
fields=self.settings.register_fields,
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.register_button,
delete_label=self.messages.delete_label,
formstyle=formstyle,
separator=self.settings.label_separator,
extra_fields = extra_fields
)
captcha = self.settings.register_captcha or self.settings.captcha
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
#Add a message if specified
if self.settings.pre_registration_div:
addrow(form, '',
DIV(_id="pre-reg", *self.settings.pre_registration_div),
'', formstyle, '')
table_user.registration_key.default = key = web2py_uuid()
if form.accepts(request, session if self.csrf_prevention else None,
formname='register',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
description = self.messages.group_description % form.vars
if self.settings.create_user_groups:
group_id = self.add_group(
self.settings.create_user_groups % form.vars, description)
self.add_membership(group_id, form.vars.id)
if self.settings.everybody_group_id:
self.add_membership(
self.settings.everybody_group_id, form.vars.id)
if self.settings.registration_requires_verification:
link = self.url(
self.settings.function, args=('verify_email', key), scheme=True)
d = dict(form.vars)
d.update(dict(key=key, link=link, username=form.vars[username]))
if not (self.settings.mailer and self.settings.mailer.send(
to=form.vars.email,
subject=self.messages.verify_email_subject,
message=self.messages.verify_email % d)):
self.db.rollback()
response.flash = self.messages.unable_send_email
return form
session.flash = self.messages.email_sent
if self.settings.registration_requires_approval and \
not self.settings.registration_requires_verification:
table_user[form.vars.id] = dict(registration_key='pending')
session.flash = self.messages.registration_pending
elif (not self.settings.registration_requires_verification or
self.settings.login_after_registration):
if not self.settings.registration_requires_verification:
table_user[form.vars.id] = dict(registration_key='')
session.flash = self.messages.registration_successful
user = table_user(**{username: form.vars[username]})
self.login_user(user)
session.flash = self.messages.logged_in
self.log_event(log, form.vars)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def is_logged_in(self):
"""
Checks if the user is logged in and returns True/False.
If so user is in auth.user as well as in session.auth.user
"""
if self.user:
return True
return False
def verify_email(self,
next=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Action used to verify the registration email
"""
key = getarg(-1)
table_user = self.table_user()
user = table_user(registration_key=key)
if not user:
redirect(self.settings.login_url)
if self.settings.registration_requires_approval:
user.update_record(registration_key='pending')
current.session.flash = self.messages.registration_pending
else:
user.update_record(registration_key='')
current.session.flash = self.messages.email_verified
# make sure session has same user.registrato_key as db record
if current.session.auth and current.session.auth.user:
current.session.auth.user.registration_key = user.registration_key
if log is DEFAULT:
log = self.messages['verify_email_log']
if next is DEFAULT:
next = self.settings.verify_email_next
if onaccept is DEFAULT:
onaccept = self.settings.verify_email_onaccept
self.log_event(log, user)
callback(onaccept, user)
redirect(next)
def retrieve_username(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to retrieve the user username
(only if there is a username field)
"""
table_user = self.table_user()
if not 'username' in table_user.fields:
raise HTTP(404)
request = current.request
response = current.response
session = current.session
captcha = self.settings.retrieve_username_captcha or \
(self.settings.retrieve_username_captcha != False and self.settings.captcha)
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if next is DEFAULT:
next = self.get_vars_next() or self.settings.retrieve_username_next
if onvalidation is DEFAULT:
onvalidation = self.settings.retrieve_username_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.retrieve_username_onaccept
if log is DEFAULT:
log = self.messages['retrieve_username_log']
old_requires = table_user.email.requires
table_user.email.requires = [IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
form = SQLFORM(table_user,
fields=['email'],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
if form.accepts(request, session if self.csrf_prevention else None,
formname='retrieve_username', dbio=False,
onvalidation=onvalidation, hideerror=self.settings.hideerror):
users = table_user._db(table_user.email==form.vars.email).select()
if not users:
current.session.flash = \
self.messages.invalid_email
redirect(self.url(args=request.args))
username = ', '.join(u.username for u in users)
self.settings.mailer.send(to=form.vars.email,
subject=self.messages.retrieve_username_subject,
message=self.messages.retrieve_username % dict(username=username))
session.flash = self.messages.email_sent
for user in users:
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next)
table_user.email.requires = old_requires
return form
def random_password(self):
import string
import random
password = ''
specials = r'!#$*'
for i in range(0, 3):
password += random.choice(string.lowercase)
password += random.choice(string.uppercase)
password += random.choice(string.digits)
password += random.choice(specials)
return ''.join(random.sample(password, len(password)))
def reset_password_deprecated(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password (deprecated)
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if next is DEFAULT:
next = self.get_vars_next() or self.settings.retrieve_password_next
if onvalidation is DEFAULT:
onvalidation = self.settings.retrieve_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.retrieve_password_onaccept
if log is DEFAULT:
log = self.messages['retrieve_password_log']
old_requires = table_user.email.requires
table_user.email.requires = [IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
form = SQLFORM(table_user,
fields=['email'],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session if self.csrf_prevention else None,
formname='retrieve_password', dbio=False,
onvalidation=onvalidation, hideerror=self.settings.hideerror):
user = table_user(email=form.vars.email)
if not user:
current.session.flash = \
self.messages.invalid_email
redirect(self.url(args=request.args))
elif user.registration_key in ('pending', 'disabled', 'blocked'):
current.session.flash = \
self.messages.registration_pending
redirect(self.url(args=request.args))
password = self.random_password()
passfield = self.settings.password_field
d = {
passfield: str(table_user[passfield].validate(password)[0]),
'registration_key': ''
}
user.update_record(**d)
if self.settings.mailer and \
self.settings.mailer.send(to=form.vars.email,
subject=self.messages.retrieve_password_subject,
message=self.messages.retrieve_password % dict(password=password)):
session.flash = self.messages.email_sent
else:
session.flash = self.messages.unable_to_send_email
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next)
table_user.email.requires = old_requires
return form
def reset_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password
"""
table_user = self.table_user()
request = current.request
# response = current.response
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.reset_password_next
if self.settings.prevent_password_reset_attacks:
key = request.vars.key
if key:
session._reset_password_key = key
redirect(self.url(args='reset_password'))
else:
key = session._reset_password_key
else:
key = request.vars.key
try:
t0 = int(key.split('-')[0])
if time.time() - t0 > 60 * 60 * 24:
raise Exception
user = table_user(reset_password_key=key)
if not user:
raise Exception
except Exception:
session.flash = self.messages.invalid_reset_password
redirect(next, client_side=self.settings.client_side)
passfield = self.settings.password_field
form = SQLFORM.factory(
Field('new_password', 'password',
label=self.messages.new_password,
requires=self.table_user()[passfield].requires),
Field('new_password2', 'password',
label=self.messages.verify_password,
requires=[IS_EXPR(
'value==%s' % repr(request.vars.new_password),
self.messages.mismatched_password)]),
submit_button=self.messages.password_reset_button,
hidden=dict(_next=next),
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session,
hideerror=self.settings.hideerror):
user.update_record(
**{passfield: str(form.vars.new_password),
'registration_key': '',
'reset_password_key': ''})
session.flash = self.messages.password_changed
if self.settings.login_after_password_change:
self.login_user(user)
redirect(next, client_side=self.settings.client_side)
return form
def request_reset_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
captcha = self.settings.retrieve_password_captcha or \
(self.settings.retrieve_password_captcha != False and self.settings.captcha)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.request_reset_password_next
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if onvalidation is DEFAULT:
onvalidation = self.settings.reset_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.reset_password_onaccept
if log is DEFAULT:
log = self.messages['reset_password_log']
userfield = self.settings.login_userfield or 'username' \
if 'username' in table_user.fields else 'email'
if userfield == 'email':
table_user.email.requires = [
IS_EMAIL(error_message=self.messages.invalid_email),
IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
if not self.settings.email_case_sensitive:
table_user.email.requires.insert(0, IS_LOWER())
else:
table_user.username.requires = [
IS_IN_DB(self.db, table_user.username,
error_message=self.messages.invalid_username)]
if not self.settings.username_case_sensitive:
table_user.username.requires.insert(0, IS_LOWER())
form = SQLFORM(table_user,
fields=[userfield],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.password_reset_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
if form.accepts(request, session if self.csrf_prevention else None,
formname='reset_password', dbio=False,
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
user = table_user(**{userfield:form.vars.get(userfield)})
if not user:
session.flash = self.messages['invalid_%s' % userfield]
redirect(self.url(args=request.args),
client_side=self.settings.client_side)
elif user.registration_key in ('pending', 'disabled', 'blocked'):
session.flash = self.messages.registration_pending
redirect(self.url(args=request.args),
client_side=self.settings.client_side)
if self.email_reset_password(user):
session.flash = self.messages.email_sent
else:
session.flash = self.messages.unable_to_send_email
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
# old_requires = table_user.email.requires
return form
def email_reset_password(self, user):
reset_password_key = str(int(time.time())) + '-' + web2py_uuid()
link = self.url(self.settings.function,
args=('reset_password',), vars={'key': reset_password_key},
scheme=True)
d = dict(user)
d.update(dict(key=reset_password_key, link=link))
if self.settings.mailer and self.settings.mailer.send(
to=user.email,
subject=self.messages.reset_password_subject,
message=self.messages.reset_password % d):
user.update_record(reset_password_key=reset_password_key)
return True
return False
def retrieve_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
if self.settings.reset_password_requires_verification:
return self.request_reset_password(next, onvalidation, onaccept, log)
else:
return self.reset_password_deprecated(next, onvalidation, onaccept, log)
def change_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form that lets the user change password
"""
if not self.is_logged_in():
redirect(self.settings.login_url,
client_side=self.settings.client_side)
db = self.db
table_user = self.table_user()
s = db(table_user.id == self.user.id)
request = current.request
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.change_password_next
if onvalidation is DEFAULT:
onvalidation = self.settings.change_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.change_password_onaccept
if log is DEFAULT:
log = self.messages['change_password_log']
passfield = self.settings.password_field
requires = table_user[passfield].requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
requires = filter(lambda t: isinstance(t, CRYPT), requires)
if requires:
requires[0].min_length = 0
form = SQLFORM.factory(
Field('old_password', 'password', requires=requires,
label=self.messages.old_password),
Field('new_password', 'password',
label=self.messages.new_password,
requires=table_user[passfield].requires),
Field('new_password2', 'password',
label=self.messages.verify_password,
requires=[IS_EXPR(
'value==%s' % repr(request.vars.new_password),
self.messages.mismatched_password)]),
submit_button=self.messages.password_change_button,
hidden=dict(_next=next),
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session,
formname='change_password',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
current_user = s.select(limitby=(0, 1), orderby_on_limitby=False).first()
if not form.vars['old_password'] == current_user[passfield]:
form.errors['old_password'] = self.messages.invalid_password
else:
d = {passfield: str(form.vars.new_password)}
s.update(**d)
session.flash = self.messages.password_changed
self.log_event(log, self.user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def profile(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form that lets the user change his/her profile
"""
table_user = self.table_user()
if not self.is_logged_in():
redirect(self.settings.login_url,
client_side=self.settings.client_side)
passfield = self.settings.password_field
table_user[passfield].writable = False
request = current.request
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.profile_next
if onvalidation is DEFAULT:
onvalidation = self.settings.profile_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.profile_onaccept
if log is DEFAULT:
log = self.messages['profile_log']
form = SQLFORM(
table_user,
self.user.id,
fields=self.settings.profile_fields,
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.profile_save_button,
delete_label=self.messages.delete_label,
upload=self.settings.download_url,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator,
deletable=self.settings.allow_delete_accounts,
)
if form.accepts(request, session,
formname='profile',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
self.user.update(table_user._filter_fields(form.vars))
session.flash = self.messages.profile_updated
self.log_event(log, self.user)
callback(onaccept, form)
if form.deleted:
return self.logout()
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def run_login_onaccept(self):
onaccept = self.settings.login_onaccept
if onaccept:
form = Storage(dict(vars=self.user))
if not isinstance(onaccept, (list, tuple)):
onaccept = [onaccept]
for callback in onaccept:
callback(form)
def is_impersonating(self):
return self.is_logged_in() and 'impersonator' in current.session.auth
def impersonate(self, user_id=DEFAULT):
"""
To use this make a POST to
`http://..../impersonate request.post_vars.user_id=<id>`
Set request.post_vars.user_id to 0 to restore original user.
requires impersonator is logged in and::
has_permission('impersonate', 'auth_user', user_id)
"""
request = current.request
session = current.session
auth = session.auth
table_user = self.table_user()
if not self.is_logged_in():
raise HTTP(401, "Not Authorized")
current_id = auth.user.id
requested_id = user_id
if user_id is DEFAULT:
user_id = current.request.post_vars.user_id
if user_id and user_id != self.user.id and user_id != '0':
if not self.has_permission('impersonate',
self.table_user(),
user_id):
raise HTTP(403, "Forbidden")
user = table_user(user_id)
if not user:
raise HTTP(401, "Not Authorized")
auth.impersonator = pickle.dumps(session, pickle.HIGHEST_PROTOCOL)
auth.user.update(
table_user._filter_fields(user, True))
self.user = auth.user
self.update_groups()
log = self.messages['impersonate_log']
self.log_event(log, dict(id=current_id, other_id=auth.user.id))
self.run_login_onaccept()
elif user_id in (0, '0'):
if self.is_impersonating():
session.clear()
session.update(pickle.loads(auth.impersonator))
self.user = session.auth.user
self.update_groups()
self.run_login_onaccept()
return None
if requested_id is DEFAULT and not request.post_vars:
return SQLFORM.factory(Field('user_id', 'integer'))
return SQLFORM(table_user, user.id, readonly=True)
def update_groups(self):
if not self.user:
return
user_groups = self.user_groups = {}
if current.session.auth:
current.session.auth.user_groups = self.user_groups
table_group = self.table_group()
table_membership = self.table_membership()
memberships = self.db(
table_membership.user_id == self.user.id).select()
for membership in memberships:
group = table_group(membership.group_id)
if group:
user_groups[membership.group_id] = group.role
def groups(self):
"""
Displays the groups and their roles for the logged in user
"""
if not self.is_logged_in():
redirect(self.settings.login_url)
table_membership = self.table_membership()
memberships = self.db(
table_membership.user_id == self.user.id).select()
table = TABLE()
for membership in memberships:
table_group = self.table_group()
groups = self.db(table_group.id == membership.group_id).select()
if groups:
group = groups[0]
table.append(TR(H3(group.role, '(%s)' % group.id)))
table.append(TR(P(group.description)))
if not memberships:
return None
return table
def not_authorized(self):
"""
You can change the view for this page to make it look as you like
"""
if current.request.ajax:
raise HTTP(403, 'ACCESS DENIED')
return self.messages.access_denied
def requires(self, condition, requires_login=True, otherwise=None):
"""
Decorator that prevents access to action if not logged in
"""
def decorator(action):
def f(*a, **b):
basic_allowed, basic_accepted, user = self.basic()
user = user or self.user
if requires_login:
if not user:
if current.request.ajax:
raise HTTP(401, self.messages.ajax_failed_authentication)
elif not otherwise is None:
if callable(otherwise):
return otherwise()
redirect(otherwise)
elif self.settings.allow_basic_login_only or \
basic_accepted or current.request.is_restful:
raise HTTP(403, "Not authorized")
else:
next = self.here()
current.session.flash = current.response.flash
return call_or_redirect(
self.settings.on_failed_authentication,
self.settings.login_url +
'?_next=' + urllib.quote(next))
if callable(condition):
flag = condition()
else:
flag = condition
if not flag:
current.session.flash = self.messages.access_denied
return call_or_redirect(
self.settings.on_failed_authorization)
return action(*a, **b)
f.__doc__ = action.__doc__
f.__name__ = action.__name__
f.__dict__.update(action.__dict__)
return f
return decorator
def requires_login(self, otherwise=None):
"""
Decorator that prevents access to action if not logged in
"""
return self.requires(True, otherwise=otherwise)
def requires_membership(self, role=None, group_id=None, otherwise=None):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of group_id.
If role is provided instead of group_id then the
group_id is calculated.
"""
def has_membership(self=self, group_id=group_id, role=role):
return self.has_membership(group_id=group_id, role=role)
return self.requires(has_membership, otherwise=otherwise)
def requires_permission(self, name, table_name='', record_id=0,
otherwise=None):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of any group (role) that
has 'name' access to 'table_name', 'record_id'.
"""
def has_permission(self=self, name=name, table_name=table_name, record_id=record_id):
return self.has_permission(name, table_name, record_id)
return self.requires(has_permission, otherwise=otherwise)
def requires_signature(self, otherwise=None, hash_vars=True):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of group_id.
If role is provided instead of group_id then the
group_id is calculated.
"""
def verify():
return URL.verify(current.request, user_signature=True, hash_vars=hash_vars)
return self.requires(verify, otherwise)
def add_group(self, role, description=''):
"""
Creates a group associated to a role
"""
group_id = self.table_group().insert(
role=role, description=description)
self.log_event(self.messages['add_group_log'],
dict(group_id=group_id, role=role))
return group_id
def del_group(self, group_id):
"""
Deletes a group
"""
self.db(self.table_group().id == group_id).delete()
self.db(self.table_membership().group_id == group_id).delete()
self.db(self.table_permission().group_id == group_id).delete()
if group_id in self.user_groups: del self.user_groups[group_id]
self.log_event(self.messages.del_group_log, dict(group_id=group_id))
def id_group(self, role):
"""
Returns the group_id of the group specified by the role
"""
rows = self.db(self.table_group().role == role).select()
if not rows:
return None
return rows[0].id
def user_group(self, user_id=None):
"""
Returns the group_id of the group uniquely associated to this user
i.e. `role=user:[user_id]`
"""
return self.id_group(self.user_group_role(user_id))
def user_group_role(self, user_id=None):
if not self.settings.create_user_groups:
return None
if user_id:
user = self.table_user()[user_id]
else:
user = self.user
return self.settings.create_user_groups % user
def has_membership(self, group_id=None, user_id=None, role=None):
"""
Checks if user is member of group_id or role
"""
group_id = group_id or self.id_group(role)
try:
group_id = int(group_id)
except:
group_id = self.id_group(group_id) # interpret group_id as a role
if not user_id and self.user:
user_id = self.user.id
membership = self.table_membership()
if group_id and user_id and self.db((membership.user_id == user_id)
& (membership.group_id == group_id)).select():
r = True
else:
r = False
self.log_event(self.messages['has_membership_log'],
dict(user_id=user_id, group_id=group_id, check=r))
return r
def add_membership(self, group_id=None, user_id=None, role=None):
"""
Gives user_id membership of group_id or role
if user is None than user_id is that of current logged in user
"""
group_id = group_id or self.id_group(role)
try:
group_id = int(group_id)
except:
group_id = self.id_group(group_id) # interpret group_id as a role
if not user_id and self.user:
user_id = self.user.id
membership = self.table_membership()
record = membership(user_id=user_id, group_id=group_id)
if record:
return record.id
else:
id = membership.insert(group_id=group_id, user_id=user_id)
if role:
self.user_groups[group_id] = role
else:
self.update_groups()
self.log_event(self.messages['add_membership_log'],
dict(user_id=user_id, group_id=group_id))
return id
def del_membership(self, group_id=None, user_id=None, role=None):
"""
Revokes membership from group_id to user_id
if user_id is None than user_id is that of current logged in user
"""
group_id = group_id or self.id_group(role)
if not user_id and self.user:
user_id = self.user.id
membership = self.table_membership()
self.log_event(self.messages['del_membership_log'],
dict(user_id=user_id, group_id=group_id))
ret = self.db(membership.user_id
== user_id)(membership.group_id
== group_id).delete()
if group_id in self.user_groups: del self.user_groups[group_id]
return ret
def has_permission(self,
name='any',
table_name='',
record_id=0,
user_id=None,
group_id=None,
):
"""
Checks if user_id or current logged in user is member of a group
that has 'name' permission on 'table_name' and 'record_id'
if group_id is passed, it checks whether the group has the permission
"""
if not group_id and self.settings.everybody_group_id and \
self.has_permission(
name, table_name, record_id, user_id=None,
group_id=self.settings.everybody_group_id):
return True
if not user_id and not group_id and self.user:
user_id = self.user.id
if user_id:
membership = self.table_membership()
rows = self.db(membership.user_id
== user_id).select(membership.group_id)
groups = set([row.group_id for row in rows])
if group_id and not group_id in groups:
return False
else:
groups = set([group_id])
permission = self.table_permission()
rows = self.db(permission.name == name)(permission.table_name
== str(table_name))(permission.record_id
== record_id).select(permission.group_id)
groups_required = set([row.group_id for row in rows])
if record_id:
rows = self.db(permission.name
== name)(permission.table_name
== str(table_name))(permission.record_id
== 0).select(permission.group_id)
groups_required = groups_required.union(set([row.group_id
for row in rows]))
if groups.intersection(groups_required):
r = True
else:
r = False
if user_id:
self.log_event(self.messages['has_permission_log'],
dict(user_id=user_id, name=name,
table_name=table_name, record_id=record_id))
return r
def add_permission(self,
group_id,
name='any',
table_name='',
record_id=0,
):
"""
Gives group_id 'name' access to 'table_name' and 'record_id'
"""
permission = self.table_permission()
if group_id == 0:
group_id = self.user_group()
record = self.db(permission.group_id == group_id)(permission.name == name)(permission.table_name == str(table_name))(
permission.record_id == long(record_id)).select(limitby=(0, 1), orderby_on_limitby=False).first()
if record:
id = record.id
else:
id = permission.insert(group_id=group_id, name=name,
table_name=str(table_name),
record_id=long(record_id))
self.log_event(self.messages['add_permission_log'],
dict(permission_id=id, group_id=group_id,
name=name, table_name=table_name,
record_id=record_id))
return id
def del_permission(self,
group_id,
name='any',
table_name='',
record_id=0,
):
"""
Revokes group_id 'name' access to 'table_name' and 'record_id'
"""
permission = self.table_permission()
self.log_event(self.messages['del_permission_log'],
dict(group_id=group_id, name=name,
table_name=table_name, record_id=record_id))
return self.db(permission.group_id == group_id)(permission.name
== name)(permission.table_name
== str(table_name))(permission.record_id
== long(record_id)).delete()
def accessible_query(self, name, table, user_id=None):
"""
Returns a query with all accessible records for user_id or
the current logged in user
this method does not work on GAE because uses JOIN and IN
Example:
Use as::
db(auth.accessible_query('read', db.mytable)).select(db.mytable.ALL)
"""
if not user_id:
user_id = self.user_id
db = self.db
if isinstance(table, str) and table in self.db.tables():
table = self.db[table]
elif isinstance(table, (Set, Query)):
# experimental: build a chained query for all tables
if isinstance(table, Set):
cquery = table.query
else:
cquery = table
tablenames = db._adapter.tables(cquery)
for tablename in tablenames:
cquery &= self.accessible_query(name, tablename,
user_id=user_id)
return cquery
if not isinstance(table, str) and\
self.has_permission(name, table, 0, user_id):
return table.id > 0
membership = self.table_membership()
permission = self.table_permission()
query = table.id.belongs(
db(membership.user_id == user_id)
(membership.group_id == permission.group_id)
(permission.name == name)
(permission.table_name == table)
._select(permission.record_id))
if self.settings.everybody_group_id:
query |= table.id.belongs(
db(permission.group_id == self.settings.everybody_group_id)
(permission.name == name)
(permission.table_name == table)
._select(permission.record_id))
return query
@staticmethod
def archive(form,
archive_table=None,
current_record='current_record',
archive_current=False,
fields=None):
"""
If you have a table (db.mytable) that needs full revision history you
can just do::
form=crud.update(db.mytable,myrecord,onaccept=auth.archive)
or::
form=SQLFORM(db.mytable,myrecord).process(onaccept=auth.archive)
crud.archive will define a new table "mytable_archive" and store
a copy of the current record (if archive_current=True)
or a copy of the previous record (if archive_current=False)
in the newly created table including a reference
to the current record.
fields allows to specify extra fields that need to be archived.
If you want to access such table you need to define it yourself
in a model::
db.define_table('mytable_archive',
Field('current_record',db.mytable),
db.mytable)
Notice such table includes all fields of db.mytable plus one: current_record.
crud.archive does not timestamp the stored record unless your original table
has a fields like::
db.define_table(...,
Field('saved_on','datetime',
default=request.now,update=request.now,writable=False),
Field('saved_by',auth.user,
default=auth.user_id,update=auth.user_id,writable=False),
there is nothing special about these fields since they are filled before
the record is archived.
If you want to change the archive table name and the name of the reference field
you can do, for example::
db.define_table('myhistory',
Field('parent_record',db.mytable),
db.mytable)
and use it as::
form=crud.update(db.mytable,myrecord,
onaccept=lambda form:crud.archive(form,
archive_table=db.myhistory,
current_record='parent_record'))
"""
if not archive_current and not form.record:
return None
table = form.table
if not archive_table:
archive_table_name = '%s_archive' % table
if not archive_table_name in table._db:
table._db.define_table(
archive_table_name,
Field(current_record, table),
*[field.clone(unique=False) for field in table])
archive_table = table._db[archive_table_name]
new_record = {current_record: form.vars.id}
for fieldname in archive_table.fields:
if not fieldname in ['id', current_record]:
if archive_current and fieldname in form.vars:
new_record[fieldname] = form.vars[fieldname]
elif form.record and fieldname in form.record:
new_record[fieldname] = form.record[fieldname]
if fields:
new_record.update(fields)
id = archive_table.insert(**new_record)
return id
def wiki(self,
slug=None,
env=None,
render='markmin',
manage_permissions=False,
force_prefix='',
restrict_search=False,
resolve=True,
extra=None,
menu_groups=None,
templates=None,
migrate=True,
controller=None,
function=None,
force_render=False,
groups=None):
if controller and function:
resolve = False
if not hasattr(self, '_wiki'):
self._wiki = Wiki(self, render=render,
manage_permissions=manage_permissions,
force_prefix=force_prefix,
restrict_search=restrict_search,
env=env, extra=extra or {},
menu_groups=menu_groups,
templates=templates,
migrate=migrate,
controller=controller,
function=function,
groups=groups)
else:
self._wiki.env.update(env or {})
# if resolve is set to True, process request as wiki call
# resolve=False allows initial setup without wiki redirection
wiki = None
if resolve:
if slug:
wiki = self._wiki.read(slug, force_render)
if isinstance(wiki, dict) and wiki.has_key('content'): # FIXME: .has_key() is deprecated
# We don't want to return a dict object, just the wiki
wiki = wiki['content']
else:
wiki = self._wiki()
if isinstance(wiki, basestring):
wiki = XML(wiki)
return wiki
def wikimenu(self):
"""To be used in menu.py for app wide wiki menus"""
if (hasattr(self, "_wiki") and
self._wiki.settings.controller and
self._wiki.settings.function):
self._wiki.automenu()
class Crud(object):
def url(self, f=None, args=None, vars=None):
"""
This should point to the controller that exposes
download and crud
"""
if args is None:
args = []
if vars is None:
vars = {}
return URL(c=self.settings.controller, f=f, args=args, vars=vars)
def __init__(self, environment, db=None, controller='default'):
self.db = db
if not db and environment and isinstance(environment, DAL):
self.db = environment
elif not db:
raise SyntaxError("must pass db as first or second argument")
self.environment = current
settings = self.settings = Settings()
settings.auth = None
settings.logger = None
settings.create_next = None
settings.update_next = None
settings.controller = controller
settings.delete_next = self.url()
settings.download_url = self.url('download')
settings.create_onvalidation = StorageList()
settings.update_onvalidation = StorageList()
settings.delete_onvalidation = StorageList()
settings.create_onaccept = StorageList()
settings.update_onaccept = StorageList()
settings.update_ondelete = StorageList()
settings.delete_onaccept = StorageList()
settings.update_deletable = True
settings.showid = False
settings.keepvalues = False
settings.create_captcha = None
settings.update_captcha = None
settings.captcha = None
settings.formstyle = 'table3cols'
settings.label_separator = ': '
settings.hideerror = False
settings.detect_record_change = True
settings.hmac_key = None
settings.lock_keys = True
messages = self.messages = Messages(current.T)
messages.submit_button = 'Submit'
messages.delete_label = 'Check to delete'
messages.record_created = 'Record Created'
messages.record_updated = 'Record Updated'
messages.record_deleted = 'Record Deleted'
messages.update_log = 'Record %(id)s updated'
messages.create_log = 'Record %(id)s created'
messages.read_log = 'Record %(id)s read'
messages.delete_log = 'Record %(id)s deleted'
messages.lock_keys = True
def __call__(self):
args = current.request.args
if len(args) < 1:
raise HTTP(404)
elif args[0] == 'tables':
return self.tables()
elif len(args) > 1 and not args(1) in self.db.tables:
raise HTTP(404)
table = self.db[args(1)]
if args[0] == 'create':
return self.create(table)
elif args[0] == 'select':
return self.select(table, linkto=self.url(args='read'))
elif args[0] == 'search':
form, rows = self.search(table, linkto=self.url(args='read'))
return DIV(form, SQLTABLE(rows))
elif args[0] == 'read':
return self.read(table, args(2))
elif args[0] == 'update':
return self.update(table, args(2))
elif args[0] == 'delete':
return self.delete(table, args(2))
else:
raise HTTP(404)
def log_event(self, message, vars):
if self.settings.logger:
self.settings.logger.log_event(message, vars, origin='crud')
def has_permission(self, name, table, record=0):
if not self.settings.auth:
return True
try:
record_id = record.id
except:
record_id = record
return self.settings.auth.has_permission(name, str(table), record_id)
def tables(self):
return TABLE(*[TR(A(name,
_href=self.url(args=('select', name))))
for name in self.db.tables])
@staticmethod
def archive(form, archive_table=None, current_record='current_record'):
return Auth.archive(form, archive_table=archive_table,
current_record=current_record)
def update(self,
table,
record,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
ondelete=DEFAULT,
log=DEFAULT,
message=DEFAULT,
deletable=DEFAULT,
formname=DEFAULT,
**attributes
):
if not (isinstance(table, Table) or table in self.db.tables) \
or (isinstance(record, str) and not str(record).isdigit()):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
try:
record_id = record.id
except:
record_id = record or 0
if record_id and not self.has_permission('update', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
if not record_id and not self.has_permission('create', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
request = current.request
response = current.response
session = current.session
if request.extension == 'json' and request.vars.json:
request.vars.update(json_parser.loads(request.vars.json))
if next is DEFAULT:
next = request.get_vars._next \
or request.post_vars._next \
or self.settings.update_next
if onvalidation is DEFAULT:
onvalidation = self.settings.update_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.update_onaccept
if ondelete is DEFAULT:
ondelete = self.settings.update_ondelete
if log is DEFAULT:
log = self.messages['update_log']
if deletable is DEFAULT:
deletable = self.settings.update_deletable
if message is DEFAULT:
message = self.messages.record_updated
if not 'hidden' in attributes:
attributes['hidden'] = {}
attributes['hidden']['_next'] = next
form = SQLFORM(
table,
record,
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
deletable=deletable,
upload=self.settings.download_url,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator,
**attributes # contains hidden
)
self.accepted = False
self.deleted = False
captcha = self.settings.update_captcha or self.settings.captcha
if record and captcha:
addrow(form, captcha.label, captcha, captcha.comment,
self.settings.formstyle, 'captcha__row')
captcha = self.settings.create_captcha or self.settings.captcha
if not record and captcha:
addrow(form, captcha.label, captcha, captcha.comment,
self.settings.formstyle, 'captcha__row')
if not request.extension in ('html', 'load'):
(_session, _formname) = (None, None)
else:
(_session, _formname) = (
session, '%s/%s' % (table._tablename, form.record_id))
if not formname is DEFAULT:
_formname = formname
keepvalues = self.settings.keepvalues
if request.vars.delete_this_record:
keepvalues = False
if isinstance(onvalidation, StorageList):
onvalidation = onvalidation.get(table._tablename, [])
if form.accepts(request, _session, formname=_formname,
onvalidation=onvalidation, keepvalues=keepvalues,
hideerror=self.settings.hideerror,
detect_record_change=self.settings.detect_record_change):
self.accepted = True
response.flash = message
if log:
self.log_event(log, form.vars)
if request.vars.delete_this_record:
self.deleted = True
message = self.messages.record_deleted
callback(ondelete, form, table._tablename)
response.flash = message
callback(onaccept, form, table._tablename)
if not request.extension in ('html', 'load'):
raise HTTP(200, 'RECORD CREATED/UPDATED')
if isinstance(next, (list, tuple)): # fix issue with 2.6
next = next[0]
if next: # Only redirect when explicit
next = replace_id(next, form)
session.flash = response.flash
redirect(next)
elif not request.extension in ('html', 'load'):
raise HTTP(401, serializers.json(dict(errors=form.errors)))
return form
def create(self,
table,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
message=DEFAULT,
formname=DEFAULT,
**attributes
):
if next is DEFAULT:
next = self.settings.create_next
if onvalidation is DEFAULT:
onvalidation = self.settings.create_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.create_onaccept
if log is DEFAULT:
log = self.messages['create_log']
if message is DEFAULT:
message = self.messages.record_created
return self.update(
table,
None,
next=next,
onvalidation=onvalidation,
onaccept=onaccept,
log=log,
message=message,
deletable=False,
formname=formname,
**attributes
)
def read(self, table, record):
if not (isinstance(table, Table) or table in self.db.tables) \
or (isinstance(record, str) and not str(record).isdigit()):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
if not self.has_permission('read', table, record):
redirect(self.settings.auth.settings.on_failed_authorization)
form = SQLFORM(
table,
record,
readonly=True,
comments=False,
upload=self.settings.download_url,
showid=self.settings.showid,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if not current.request.extension in ('html', 'load'):
return table._filter_fields(form.record, id=True)
return form
def delete(self,
table,
record_id,
next=DEFAULT,
message=DEFAULT,
):
if not (isinstance(table, Table) or table in self.db.tables):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
if not self.has_permission('delete', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
request = current.request
session = current.session
if next is DEFAULT:
next = request.get_vars._next \
or request.post_vars._next \
or self.settings.delete_next
if message is DEFAULT:
message = self.messages.record_deleted
record = table[record_id]
if record:
callback(self.settings.delete_onvalidation, record)
del table[record_id]
callback(self.settings.delete_onaccept, record, table._tablename)
session.flash = message
redirect(next)
def rows(
self,
table,
query=None,
fields=None,
orderby=None,
limitby=None,
):
if not (isinstance(table, Table) or table in self.db.tables):
raise HTTP(404)
if not self.has_permission('select', table):
redirect(self.settings.auth.settings.on_failed_authorization)
#if record_id and not self.has_permission('select', table):
# redirect(self.settings.auth.settings.on_failed_authorization)
if not isinstance(table, Table):
table = self.db[table]
if not query:
query = table.id > 0
if not fields:
fields = [field for field in table if field.readable]
else:
fields = [table[f] if isinstance(f, str) else f for f in fields]
rows = self.db(query).select(*fields, **dict(orderby=orderby,
limitby=limitby))
return rows
def select(self,
table,
query=None,
fields=None,
orderby=None,
limitby=None,
headers=None,
**attr
):
headers = headers or {}
rows = self.rows(table, query, fields, orderby, limitby)
if not rows:
return None # Nicer than an empty table.
if not 'upload' in attr:
attr['upload'] = self.url('download')
if not current.request.extension in ('html', 'load'):
return rows.as_list()
if not headers:
if isinstance(table, str):
table = self.db[table]
headers = dict((str(k), k.label) for k in table)
return SQLTABLE(rows, headers=headers, **attr)
def get_format(self, field):
rtable = field._db[field.type[10:]]
format = rtable.get('_format', None)
if format and isinstance(format, str):
return format[2:-2]
return field.name
def get_query(self, field, op, value, refsearch=False):
try:
if refsearch:
format = self.get_format(field)
if op == 'equals':
if not refsearch:
return field == value
else:
return lambda row: row[field.name][format] == value
elif op == 'not equal':
if not refsearch:
return field != value
else:
return lambda row: row[field.name][format] != value
elif op == 'greater than':
if not refsearch:
return field > value
else:
return lambda row: row[field.name][format] > value
elif op == 'less than':
if not refsearch:
return field < value
else:
return lambda row: row[field.name][format] < value
elif op == 'starts with':
if not refsearch:
return field.like(value + '%')
else:
return lambda row: str(row[field.name][format]).startswith(value)
elif op == 'ends with':
if not refsearch:
return field.like('%' + value)
else:
return lambda row: str(row[field.name][format]).endswith(value)
elif op == 'contains':
if not refsearch:
return field.like('%' + value + '%')
else:
return lambda row: value in row[field.name][format]
except:
return None
def search(self, *tables, **args):
"""
Creates a search form and its results for a table
Examples:
Use as::
form, results = crud.search(db.test,
queries = ['equals', 'not equal', 'contains'],
query_labels={'equals':'Equals',
'not equal':'Not equal'},
fields = ['id','children'],
field_labels = {
'id':'ID','children':'Children'},
zero='Please choose',
query = (db.test.id > 0)&(db.test.id != 3) )
"""
table = tables[0]
fields = args.get('fields', table.fields)
validate = args.get('validate', True)
request = current.request
db = self.db
if not (isinstance(table, Table) or table in db.tables):
raise HTTP(404)
attributes = {}
for key in ('orderby', 'groupby', 'left', 'distinct', 'limitby', 'cache'):
if key in args:
attributes[key] = args[key]
tbl = TABLE()
selected = []
refsearch = []
results = []
showall = args.get('showall', False)
if showall:
selected = fields
chkall = args.get('chkall', False)
if chkall:
for f in fields:
request.vars['chk%s' % f] = 'on'
ops = args.get('queries', [])
zero = args.get('zero', '')
if not ops:
ops = ['equals', 'not equal', 'greater than',
'less than', 'starts with',
'ends with', 'contains']
ops.insert(0, zero)
query_labels = args.get('query_labels', {})
query = args.get('query', table.id > 0)
field_labels = args.get('field_labels', {})
for field in fields:
field = table[field]
if not field.readable:
continue
fieldname = field.name
chkval = request.vars.get('chk' + fieldname, None)
txtval = request.vars.get('txt' + fieldname, None)
opval = request.vars.get('op' + fieldname, None)
row = TR(TD(INPUT(_type="checkbox", _name="chk" + fieldname,
_disabled=(field.type == 'id'),
value=(field.type == 'id' or chkval == 'on'))),
TD(field_labels.get(fieldname, field.label)),
TD(SELECT([OPTION(query_labels.get(op, op),
_value=op) for op in ops],
_name="op" + fieldname,
value=opval)),
TD(INPUT(_type="text", _name="txt" + fieldname,
_value=txtval, _id='txt' + fieldname,
_class=str(field.type))))
tbl.append(row)
if request.post_vars and (chkval or field.type == 'id'):
if txtval and opval != '':
if field.type[0:10] == 'reference ':
refsearch.append(self.get_query(field, opval, txtval, refsearch=True))
elif validate:
value, error = field.validate(txtval)
if not error:
### TODO deal with 'starts with', 'ends with', 'contains' on GAE
query &= self.get_query(field, opval, value)
else:
row[3].append(DIV(error, _class='error'))
else:
query &= self.get_query(field, opval, txtval)
selected.append(field)
form = FORM(tbl, INPUT(_type="submit"))
if selected:
try:
results = db(query).select(*selected, **attributes)
for r in refsearch:
results = results.find(r)
except: # hmmm, we should do better here
results = None
return form, results
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor()))
def fetch(url, data=None, headers=None,
cookie=Cookie.SimpleCookie(),
user_agent='Mozilla/5.0'):
headers = headers or {}
if not data is None:
data = urllib.urlencode(data)
if user_agent:
headers['User-agent'] = user_agent
headers['Cookie'] = ' '.join(
['%s=%s;' % (c.key, c.value) for c in cookie.values()])
try:
from google.appengine.api import urlfetch
except ImportError:
req = urllib2.Request(url, data, headers)
html = urllib2.urlopen(req).read()
else:
method = ((data is None) and urlfetch.GET) or urlfetch.POST
while url is not None:
response = urlfetch.fetch(url=url, payload=data,
method=method, headers=headers,
allow_truncated=False, follow_redirects=False,
deadline=10)
# next request will be a get, so no need to send the data again
data = None
method = urlfetch.GET
# load cookies from the response
cookie.load(response.headers.get('set-cookie', ''))
url = response.headers.get('location')
html = response.content
return html
regex_geocode = \
re.compile(r"""<geometry>[\W]*?<location>[\W]*?<lat>(?P<la>[^<]*)</lat>[\W]*?<lng>(?P<lo>[^<]*)</lng>[\W]*?</location>""")
def geocode(address):
try:
a = urllib.quote(address)
txt = fetch('http://maps.googleapis.com/maps/api/geocode/xml?sensor=false&address=%s'
% a)
item = regex_geocode.search(txt)
(la, lo) = (float(item.group('la')), float(item.group('lo')))
return (la, lo)
except:
return (0.0, 0.0)
def reverse_geocode(lat, lng, lang=None):
""" Try to get an approximate address for a given latitude, longitude. """
if not lang:
lang = current.T.accepted_language
try:
return json_parser.loads(fetch('http://maps.googleapis.com/maps/api/geocode/json?latlng=%(lat)s,%(lng)s&language=%(lang)s' % locals()))['results'][0]['formatted_address']
except:
return ''
def universal_caller(f, *a, **b):
c = f.func_code.co_argcount
n = f.func_code.co_varnames[:c]
defaults = f.func_defaults or []
pos_args = n[0:-len(defaults)]
named_args = n[-len(defaults):]
arg_dict = {}
# Fill the arg_dict with name and value for the submitted, positional values
for pos_index, pos_val in enumerate(a[:c]):
arg_dict[n[pos_index]] = pos_val # n[pos_index] is the name of the argument
# There might be pos_args left, that are sent as named_values. Gather them as well.
# If a argument already is populated with values we simply replaces them.
for arg_name in pos_args[len(arg_dict):]:
if arg_name in b:
arg_dict[arg_name] = b[arg_name]
if len(arg_dict) >= len(pos_args):
# All the positional arguments is found. The function may now be called.
# However, we need to update the arg_dict with the values from the named arguments as well.
for arg_name in named_args:
if arg_name in b:
arg_dict[arg_name] = b[arg_name]
return f(**arg_dict)
# Raise an error, the function cannot be called.
raise HTTP(404, "Object does not exist")
class Service(object):
def __init__(self, environment=None):
self.run_procedures = {}
self.csv_procedures = {}
self.xml_procedures = {}
self.rss_procedures = {}
self.json_procedures = {}
self.jsonrpc_procedures = {}
self.jsonrpc2_procedures = {}
self.xmlrpc_procedures = {}
self.amfrpc_procedures = {}
self.amfrpc3_procedures = {}
self.soap_procedures = {}
def run(self, f):
"""
Example:
Use as::
service = Service()
@service.run
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/run/myfunction?a=3&b=4
"""
self.run_procedures[f.__name__] = f
return f
def csv(self, f):
"""
Example:
Use as::
service = Service()
@service.csv
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/csv/myfunction?a=3&b=4
"""
self.run_procedures[f.__name__] = f
return f
def xml(self, f):
"""
Example:
Use as::
service = Service()
@service.xml
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/xml/myfunction?a=3&b=4
"""
self.run_procedures[f.__name__] = f
return f
def rss(self, f):
"""
Example:
Use as::
service = Service()
@service.rss
def myfunction():
return dict(title=..., link=..., description=...,
created_on=..., entries=[dict(title=..., link=...,
description=..., created_on=...])
def call():
return service()
Then call it with:
wget http://..../app/default/call/rss/myfunction
"""
self.rss_procedures[f.__name__] = f
return f
def json(self, f):
"""
Example:
Use as::
service = Service()
@service.json
def myfunction(a, b):
return [{a: b}]
def call():
return service()
Then call it with:;
wget http://..../app/default/call/json/myfunction?a=hello&b=world
"""
self.json_procedures[f.__name__] = f
return f
def jsonrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.jsonrpc
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget http://..../app/default/call/jsonrpc/myfunction?a=hello&b=world
"""
self.jsonrpc_procedures[f.__name__] = f
return f
def jsonrpc2(self, f):
"""
Example:
Use as::
service = Service()
@service.jsonrpc2
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget --post-data '{"jsonrpc": "2.0", "id": 1, "method": "myfunction", "params": {"a": 1, "b": 2}}' http://..../app/default/call/jsonrpc2
"""
self.jsonrpc2_procedures[f.__name__] = f
return f
def xmlrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.xmlrpc
def myfunction(a, b):
return a + b
def call():
return service()
The call it with:
wget http://..../app/default/call/xmlrpc/myfunction?a=hello&b=world
"""
self.xmlrpc_procedures[f.__name__] = f
return f
def amfrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.amfrpc
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/amfrpc/myfunction?a=hello&b=world
"""
self.amfrpc_procedures[f.__name__] = f
return f
def amfrpc3(self, domain='default'):
"""
Example:
Use as::
service = Service()
@service.amfrpc3('domain')
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget http://..../app/default/call/amfrpc3/myfunction?a=hello&b=world
"""
if not isinstance(domain, str):
raise SyntaxError("AMF3 requires a domain for function")
def _amfrpc3(f):
if domain:
self.amfrpc3_procedures[domain + '.' + f.__name__] = f
else:
self.amfrpc3_procedures[f.__name__] = f
return f
return _amfrpc3
def soap(self, name=None, returns=None, args=None, doc=None):
"""
Example:
Use as::
service = Service()
@service.soap('MyFunction',returns={'result':int},args={'a':int,'b':int,})
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
from gluon.contrib.pysimplesoap.client import SoapClient
client = SoapClient(wsdl="http://..../app/default/call/soap?WSDL")
response = client.MyFunction(a=1,b=2)
return response['result']
It also exposes online generated documentation and xml example messages
at `http://..../app/default/call/soap`
"""
def _soap(f):
self.soap_procedures[name or f.__name__] = f, returns, args, doc
return f
return _soap
def serve_run(self, args=None):
request = current.request
if not args:
args = request.args
if args and args[0] in self.run_procedures:
return str(universal_caller(self.run_procedures[args[0]],
*args[1:], **dict(request.vars)))
self.error()
def serve_csv(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'text/x-csv'
if not args:
args = request.args
def none_exception(value):
if isinstance(value, unicode):
return value.encode('utf8')
if hasattr(value, 'isoformat'):
return value.isoformat()[:19].replace('T', ' ')
if value is None:
return '<NULL>'
return value
if args and args[0] in self.run_procedures:
import types
r = universal_caller(self.run_procedures[args[0]],
*args[1:], **dict(request.vars))
s = cStringIO.StringIO()
if hasattr(r, 'export_to_csv_file'):
r.export_to_csv_file(s)
elif r and not isinstance(r, types.GeneratorType) and isinstance(r[0], (dict, Storage)):
import csv
writer = csv.writer(s)
writer.writerow(r[0].keys())
for line in r:
writer.writerow([none_exception(v)
for v in line.values()])
else:
import csv
writer = csv.writer(s)
for line in r:
writer.writerow(line)
return s.getvalue()
self.error()
def serve_xml(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'text/xml'
if not args:
args = request.args
if args and args[0] in self.run_procedures:
s = universal_caller(self.run_procedures[args[0]],
*args[1:], **dict(request.vars))
if hasattr(s, 'as_list'):
s = s.as_list()
return serializers.xml(s, quote=False)
self.error()
def serve_rss(self, args=None):
request = current.request
response = current.response
if not args:
args = request.args
if args and args[0] in self.rss_procedures:
feed = universal_caller(self.rss_procedures[args[0]],
*args[1:], **dict(request.vars))
else:
self.error()
response.headers['Content-Type'] = 'application/rss+xml'
return serializers.rss(feed)
def serve_json(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'application/json; charset=utf-8'
if not args:
args = request.args
d = dict(request.vars)
if args and args[0] in self.json_procedures:
s = universal_caller(self.json_procedures[args[0]], *args[1:], **d)
if hasattr(s, 'as_list'):
s = s.as_list()
return response.json(s)
self.error()
class JsonRpcException(Exception):
def __init__(self, code, info):
jrpc_error = Service.jsonrpc_errors.get(code)
if jrpc_error:
self.message, self.description = jrpc_error
self.code, self.info = code, info
# jsonrpc 2.0 error types. records the following structure {code: (message,meaning)}
jsonrpc_errors = {
-32700: ("Parse error. Invalid JSON was received by the server.", "An error occurred on the server while parsing the JSON text."),
-32600: ("Invalid Request", "The JSON sent is not a valid Request object."),
-32601: ("Method not found", "The method does not exist / is not available."),
-32602: ("Invalid params", "Invalid method parameter(s)."),
-32603: ("Internal error", "Internal JSON-RPC error."),
-32099: ("Server error", "Reserved for implementation-defined server-errors.")}
def serve_jsonrpc(self):
def return_response(id, result):
return serializers.json({'version': '1.1',
'id': id, 'result': result, 'error': None})
def return_error(id, code, message, data=None):
error = {'name': 'JSONRPCError',
'code': code, 'message': message}
if data is not None:
error['data'] = data
return serializers.json({'id': id,
'version': '1.1',
'error': error,
})
request = current.request
response = current.response
response.headers['Content-Type'] = 'application/json; charset=utf-8'
methods = self.jsonrpc_procedures
data = json_parser.loads(request.body.read())
jsonrpc_2 = data.get('jsonrpc')
if jsonrpc_2: #hand over to version 2 of the protocol
return self.serve_jsonrpc2(data)
id, method, params = data.get('id'), data.get('method'), data.get('params', [])
if id is None:
return return_error(0, 100, 'missing id')
if not method in methods:
return return_error(id, 100, 'method "%s" does not exist' % method)
try:
if isinstance(params, dict):
s = methods[method](**params)
else:
s = methods[method](*params)
if hasattr(s, 'as_list'):
s = s.as_list()
return return_response(id, s)
except Service.JsonRpcException, e:
return return_error(id, e.code, e.info)
except:
etype, eval, etb = sys.exc_info()
message = '%s: %s' % (etype.__name__, eval)
data = request.is_local and traceback.format_tb(etb)
logger.warning('jsonrpc exception %s\n%s' % (message, traceback.format_tb(etb)))
return return_error(id, 100, message, data)
def serve_jsonrpc2(self, data=None, batch_element=False):
def return_response(id, result):
if not must_respond:
return None
return serializers.json({'jsonrpc': '2.0',
'id': id, 'result': result})
def return_error(id, code, message=None, data=None):
error = {'code': code}
if Service.jsonrpc_errors.has_key(code):
error['message'] = Service.jsonrpc_errors[code][0]
error['data'] = Service.jsonrpc_errors[code][1]
if message is not None:
error['message'] = message
if data is not None:
error['data'] = data
return serializers.json({'jsonrpc': '2.0',
'id': id,
'error': error})
def validate(data):
"""
Validate request as defined in: http://www.jsonrpc.org/specification#request_object.
Args:
data(str): The json object.
Returns:
- True -- if successful
- False -- if no error should be reported (i.e. data is missing 'id' member)
Raises:
JsonRPCException
"""
iparms = set(data.keys())
mandatory_args = set(['jsonrpc', 'method'])
missing_args = mandatory_args - iparms
if missing_args:
raise Service.JsonRpcException(-32600, 'Missing arguments %s.' % list(missing_args))
if data['jsonrpc'] != '2.0':
raise Service.JsonRpcException(-32603, 'Unsupported jsonrpc version "%s"' % data['jsonrpc'])
if 'id' not in iparms:
return False
return True
request = current.request
response = current.response
if not data:
response.headers['Content-Type'] = 'application/json; charset=utf-8'
try:
data = json_parser.loads(request.body.read())
except ValueError: # decoding error in json lib
return return_error(None, -32700)
# Batch handling
if isinstance(data, list) and not batch_element:
retlist = []
for c in data:
retstr = self.serve_jsonrpc2(c, batch_element=True)
if retstr: # do not add empty responses
retlist.append(retstr)
if len(retlist) == 0: # return nothing
return ''
else:
return "[" + ','.join(retlist) + "]"
methods = self.jsonrpc2_procedures
methods.update(self.jsonrpc_procedures)
try:
must_respond = validate(data)
except Service.JsonRpcException, e:
return return_error(None, e.code, e.info)
id, method, params = data.get('id'), data['method'], data.get('params', '')
if not method in methods:
return return_error(id, -32601, data='Method "%s" does not exist' % method)
try:
if isinstance(params, dict):
s = methods[method](**params)
else:
s = methods[method](*params)
if hasattr(s, 'as_list'):
s = s.as_list()
if must_respond:
return return_response(id, s)
else:
return ''
except HTTP, e:
raise e
except Service.JsonRpcException, e:
return return_error(id, e.code, e.info)
except:
etype, eval, etb = sys.exc_info()
data = '%s: %s\n' % (etype.__name__, eval) + str(request.is_local and traceback.format_tb(etb))
logger.warning('%s: %s\n%s' % (etype.__name__, eval, traceback.format_tb(etb)))
return return_error(id, -32099, data=data)
def serve_xmlrpc(self):
request = current.request
response = current.response
services = self.xmlrpc_procedures.values()
return response.xmlrpc(request, services)
def serve_amfrpc(self, version=0):
try:
import pyamf
import pyamf.remoting.gateway
except:
return "pyamf not installed or not in Python sys.path"
request = current.request
response = current.response
if version == 3:
services = self.amfrpc3_procedures
base_gateway = pyamf.remoting.gateway.BaseGateway(services)
pyamf_request = pyamf.remoting.decode(request.body)
else:
services = self.amfrpc_procedures
base_gateway = pyamf.remoting.gateway.BaseGateway(services)
context = pyamf.get_context(pyamf.AMF0)
pyamf_request = pyamf.remoting.decode(request.body, context)
pyamf_response = pyamf.remoting.Envelope(pyamf_request.amfVersion)
for name, message in pyamf_request:
pyamf_response[name] = base_gateway.getProcessor(message)(message)
response.headers['Content-Type'] = pyamf.remoting.CONTENT_TYPE
if version == 3:
return pyamf.remoting.encode(pyamf_response).getvalue()
else:
return pyamf.remoting.encode(pyamf_response, context).getvalue()
def serve_soap(self, version="1.1"):
try:
from gluon.contrib.pysimplesoap.server import SoapDispatcher
except:
return "pysimplesoap not installed in contrib"
request = current.request
response = current.response
procedures = self.soap_procedures
location = "%s://%s%s" % (
request.env.wsgi_url_scheme,
request.env.http_host,
URL(r=request, f="call/soap", vars={}))
namespace = 'namespace' in response and response.namespace or location
documentation = response.description or ''
dispatcher = SoapDispatcher(
name=response.title,
location=location,
action=location, # SOAPAction
namespace=namespace,
prefix='pys',
documentation=documentation,
ns=True)
for method, (function, returns, args, doc) in procedures.iteritems():
dispatcher.register_function(method, function, returns, args, doc)
if request.env.request_method == 'POST':
fault = {}
# Process normal Soap Operation
response.headers['Content-Type'] = 'text/xml'
xml = dispatcher.dispatch(request.body.read(), fault=fault)
if fault:
# May want to consider populating a ticket here...
response.status = 500
# return the soap response
return xml
elif 'WSDL' in request.vars:
# Return Web Service Description
response.headers['Content-Type'] = 'text/xml'
return dispatcher.wsdl()
elif 'op' in request.vars:
# Return method help webpage
response.headers['Content-Type'] = 'text/html'
method = request.vars['op']
sample_req_xml, sample_res_xml, doc = dispatcher.help(method)
body = [H1("Welcome to Web2Py SOAP webservice gateway"),
A("See all webservice operations",
_href=URL(r=request, f="call/soap", vars={})),
H2(method),
P(doc),
UL(LI("Location: %s" % dispatcher.location),
LI("Namespace: %s" % dispatcher.namespace),
LI("SoapAction: %s" % dispatcher.action),
),
H3("Sample SOAP XML Request Message:"),
CODE(sample_req_xml, language="xml"),
H3("Sample SOAP XML Response Message:"),
CODE(sample_res_xml, language="xml"),
]
return {'body': body}
else:
# Return general help and method list webpage
response.headers['Content-Type'] = 'text/html'
body = [H1("Welcome to Web2Py SOAP webservice gateway"),
P(response.description),
P("The following operations are available"),
A("See WSDL for webservice description",
_href=URL(r=request, f="call/soap", vars={"WSDL":None})),
UL([LI(A("%s: %s" % (method, doc or ''),
_href=URL(r=request, f="call/soap", vars={'op': method})))
for method, doc in dispatcher.list_methods()]),
]
return {'body': body}
def __call__(self):
"""
Registers services with::
service = Service()
@service.run
@service.rss
@service.json
@service.jsonrpc
@service.xmlrpc
@service.amfrpc
@service.amfrpc3('domain')
@service.soap('Method', returns={'Result':int}, args={'a':int,'b':int,})
Exposes services with::
def call():
return service()
You can call services with::
http://..../app/default/call/run?[parameters]
http://..../app/default/call/rss?[parameters]
http://..../app/default/call/json?[parameters]
http://..../app/default/call/jsonrpc
http://..../app/default/call/xmlrpc
http://..../app/default/call/amfrpc
http://..../app/default/call/amfrpc3
http://..../app/default/call/soap
"""
request = current.request
if len(request.args) < 1:
raise HTTP(404, "Not Found")
arg0 = request.args(0)
if arg0 == 'run':
return self.serve_run(request.args[1:])
elif arg0 == 'rss':
return self.serve_rss(request.args[1:])
elif arg0 == 'csv':
return self.serve_csv(request.args[1:])
elif arg0 == 'xml':
return self.serve_xml(request.args[1:])
elif arg0 == 'json':
return self.serve_json(request.args[1:])
elif arg0 == 'jsonrpc':
return self.serve_jsonrpc()
elif arg0 == 'jsonrpc2':
return self.serve_jsonrpc2()
elif arg0 == 'xmlrpc':
return self.serve_xmlrpc()
elif arg0 == 'amfrpc':
return self.serve_amfrpc()
elif arg0 == 'amfrpc3':
return self.serve_amfrpc(3)
elif arg0 == 'soap':
return self.serve_soap()
else:
self.error()
def error(self):
raise HTTP(404, "Object does not exist")
def completion(callback):
"""
Executes a task on completion of the called action.
Example:
Use as::
from gluon.tools import completion
@completion(lambda d: logging.info(repr(d)))
def index():
return dict(message='hello')
It logs the output of the function every time input is called.
The argument of completion is executed in a new thread.
"""
def _completion(f):
def __completion(*a, **b):
d = None
try:
d = f(*a, **b)
return d
finally:
thread.start_new_thread(callback, (d,))
return __completion
return _completion
def prettydate(d, T=lambda x: x):
if isinstance(d, datetime.datetime):
dt = datetime.datetime.now() - d
elif isinstance(d, datetime.date):
dt = datetime.date.today() - d
elif not d:
return ''
else:
return '[invalid date]'
if dt.days < 0:
suffix = ' from now'
dt = -dt
else:
suffix = ' ago'
if dt.days >= 2 * 365:
return T('%d years' + suffix) % int(dt.days / 365)
elif dt.days >= 365:
return T('1 year' + suffix)
elif dt.days >= 60:
return T('%d months' + suffix) % int(dt.days / 30)
elif dt.days > 21:
return T('1 month' + suffix)
elif dt.days >= 14:
return T('%d weeks' + suffix) % int(dt.days / 7)
elif dt.days >= 7:
return T('1 week' + suffix)
elif dt.days > 1:
return T('%d days' + suffix) % dt.days
elif dt.days == 1:
return T('1 day' + suffix)
elif dt.seconds >= 2 * 60 * 60:
return T('%d hours' + suffix) % int(dt.seconds / 3600)
elif dt.seconds >= 60 * 60:
return T('1 hour' + suffix)
elif dt.seconds >= 2 * 60:
return T('%d minutes' + suffix) % int(dt.seconds / 60)
elif dt.seconds >= 60:
return T('1 minute' + suffix)
elif dt.seconds > 1:
return T('%d seconds' + suffix) % dt.seconds
elif dt.seconds == 1:
return T('1 second' + suffix)
else:
return T('now')
def test_thread_separation():
def f():
c = PluginManager()
lock1.acquire()
lock2.acquire()
c.x = 7
lock1.release()
lock2.release()
lock1 = thread.allocate_lock()
lock2 = thread.allocate_lock()
lock1.acquire()
thread.start_new_thread(f, ())
a = PluginManager()
a.x = 5
lock1.release()
lock2.acquire()
return a.x
class PluginManager(object):
"""
Plugin Manager is similar to a storage object but it is a single level
singleton. This means that multiple instances within the same thread share
the same attributes.
Its constructor is also special. The first argument is the name of the
plugin you are defining.
The named arguments are parameters needed by the plugin with default values.
If the parameters were previous defined, the old values are used.
Example:
in some general configuration file::
plugins = PluginManager()
plugins.me.param1=3
within the plugin model::
_ = PluginManager('me',param1=5,param2=6,param3=7)
where the plugin is used::
>>> print plugins.me.param1
3
>>> print plugins.me.param2
6
>>> plugins.me.param3 = 8
>>> print plugins.me.param3
8
Here are some tests::
>>> a=PluginManager()
>>> a.x=6
>>> b=PluginManager('check')
>>> print b.x
6
>>> b=PluginManager() # reset settings
>>> print b.x
<Storage {}>
>>> b.x=7
>>> print a.x
7
>>> a.y.z=8
>>> print b.y.z
8
>>> test_thread_separation()
5
>>> plugins=PluginManager('me',db='mydb')
>>> print plugins.me.db
mydb
>>> print 'me' in plugins
True
>>> print plugins.me.installed
True
"""
instances = {}
def __new__(cls, *a, **b):
id = thread.get_ident()
lock = thread.allocate_lock()
try:
lock.acquire()
try:
return cls.instances[id]
except KeyError:
instance = object.__new__(cls, *a, **b)
cls.instances[id] = instance
return instance
finally:
lock.release()
def __init__(self, plugin=None, **defaults):
if not plugin:
self.__dict__.clear()
settings = self.__getattr__(plugin)
settings.installed = True
settings.update(
(k, v) for k, v in defaults.items() if not k in settings)
def __getattr__(self, key):
if not key in self.__dict__:
self.__dict__[key] = Storage()
return self.__dict__[key]
def keys(self):
return self.__dict__.keys()
def __contains__(self, key):
return key in self.__dict__
class Expose(object):
def __init__(self, base=None, basename=None, extensions=None, allow_download=True):
"""
Examples:
Use as::
def static():
return dict(files=Expose())
or::
def static():
path = os.path.join(request.folder,'static','public')
return dict(files=Expose(path,basename='public'))
Args:
extensions: an optional list of file extensions for filtering
displayed files: e.g. `['.py', '.jpg']`
allow_download: whether to allow downloading selected files
"""
current.session.forget()
base = base or os.path.join(current.request.folder, 'static')
basename = basename or current.request.function
self.basename = basename
if current.request.raw_args:
self.args = [arg for arg in current.request.raw_args.split('/') if arg]
else:
self.args = [arg for arg in current.request.args if arg]
filename = os.path.join(base, *self.args)
if not os.path.exists(filename):
raise HTTP(404, "FILE NOT FOUND")
if not os.path.normpath(filename).startswith(base):
raise HTTP(401, "NOT AUTHORIZED")
if allow_download and not os.path.isdir(filename):
current.response.headers['Content-Type'] = contenttype(filename)
raise HTTP(200, open(filename, 'rb'), **current.response.headers)
self.path = path = os.path.join(filename, '*')
self.folders = [f[len(path) - 1:] for f in sorted(glob.glob(path))
if os.path.isdir(f) and not self.isprivate(f)]
self.filenames = [f[len(path) - 1:] for f in sorted(glob.glob(path))
if not os.path.isdir(f) and not self.isprivate(f)]
if 'README' in self.filenames:
readme = open(os.path.join(filename, 'README')).read()
self.paragraph = MARKMIN(readme)
else:
self.paragraph = None
if extensions:
self.filenames = [f for f in self.filenames
if os.path.splitext(f)[-1] in extensions]
def breadcrumbs(self, basename):
path = []
span = SPAN()
span.append(A(basename, _href=URL()))
for arg in self.args:
span.append('/')
path.append(arg)
span.append(A(arg, _href=URL(args='/'.join(path))))
return span
def table_folders(self):
if self.folders:
return SPAN(H3('Folders'), TABLE(
*[TR(TD(A(folder, _href=URL(args=self.args + [folder]))))
for folder in self.folders],
**dict(_class="table")))
return ''
@staticmethod
def isprivate(f):
return 'private' in f or f.startswith('.') or f.endswith('~')
@staticmethod
def isimage(f):
return os.path.splitext(f)[-1].lower() in (
'.png', '.jpg', '.jpeg', '.gif', '.tiff')
def table_files(self, width=160):
if self.filenames:
return SPAN(H3('Files'),
TABLE(*[TR(TD(A(f, _href=URL(args=self.args + [f]))),
TD(IMG(_src=URL(args=self.args + [f]),
_style='max-width:%spx' % width)
if width and self.isimage(f) else ''))
for f in self.filenames],
**dict(_class="table")))
return ''
def xml(self):
return DIV(
H2(self.breadcrumbs(self.basename)),
self.paragraph or '',
self.table_folders(),
self.table_files()).xml()
class Wiki(object):
everybody = 'everybody'
rows_page = 25
def markmin_base(self, body):
return MARKMIN(body, extra=self.settings.extra,
url=True, environment=self.env,
autolinks=lambda link: expand_one(link, {})).xml()
def render_tags(self, tags):
return DIV(
_class='w2p_wiki_tags',
*[A(t.strip(), _href=URL(args='_search', vars=dict(q=t)))
for t in tags or [] if t.strip()])
def markmin_render(self, page):
return self.markmin_base(page.body) + self.render_tags(page.tags).xml()
def html_render(self, page):
html = page.body
# @///function -> http://..../function
html = replace_at_urls(html, URL)
# http://...jpg -> <img src="http://...jpg/> or embed
html = replace_autolinks(html, lambda link: expand_one(link, {}))
# @{component:name} -> <script>embed component name</script>
html = replace_components(html, self.env)
html = html + self.render_tags(page.tags).xml()
return html
@staticmethod
def component(text):
"""
In wiki docs allows `@{component:controller/function/args}`
which renders as a `LOAD(..., ajax=True)`
"""
items = text.split('/')
controller, function, args = items[0], items[1], items[2:]
return LOAD(controller, function, args=args, ajax=True).xml()
def get_renderer(self):
if isinstance(self.settings.render, basestring):
r = getattr(self, "%s_render" % self.settings.render)
elif callable(self.settings.render):
r = self.settings.render
elif isinstance(self.settings.render, dict):
def custom_render(page):
if page.render:
if page.render in self.settings.render.keys():
my_render = self.settings.render[page.render]
else:
my_render = getattr(self, "%s_render" % page.render)
else:
my_render = self.markmin_render
return my_render(page)
r = custom_render
else:
raise ValueError(
"Invalid render type %s" % type(self.settings.render))
return r
def __init__(self, auth, env=None, render='markmin',
manage_permissions=False, force_prefix='',
restrict_search=False, extra=None,
menu_groups=None, templates=None, migrate=True,
controller=None, function=None, groups=None):
settings = self.settings = auth.settings.wiki
"""
Args:
render:
- "markmin"
- "html"
- `<function>` : Sets a custom render function
- `dict(html=<function>, markmin=...)`: dict(...) allows
multiple custom render functions
- "multiple" : Is the same as `{}`. It enables per-record
formats using builtins
"""
engines = set(['markmin', 'html'])
show_engine = False
if render == "multiple":
render = {}
if isinstance(render, dict):
[engines.add(key) for key in render]
show_engine = True
settings.render = render
perms = settings.manage_permissions = manage_permissions
settings.force_prefix = force_prefix
settings.restrict_search = restrict_search
settings.extra = extra or {}
settings.menu_groups = menu_groups
settings.templates = templates
settings.controller = controller
settings.function = function
settings.groups = auth.user_groups.values() \
if groups is None else groups
db = auth.db
self.env = env or {}
self.env['component'] = Wiki.component
self.auth = auth
self.wiki_menu_items = None
if self.auth.user:
self.settings.force_prefix = force_prefix % self.auth.user
else:
self.settings.force_prefix = force_prefix
self.host = current.request.env.http_host
table_definitions = [
('wiki_page', {
'args': [
Field('slug',
requires=[IS_SLUG(),
IS_NOT_IN_DB(db, 'wiki_page.slug')],
writable=False),
Field('title', length=255, unique=True),
Field('body', 'text', notnull=True),
Field('tags', 'list:string'),
Field('can_read', 'list:string',
writable=perms,
readable=perms,
default=[Wiki.everybody]),
Field('can_edit', 'list:string',
writable=perms, readable=perms,
default=[Wiki.everybody]),
Field('changelog'),
Field('html', 'text',
compute=self.get_renderer(),
readable=False, writable=False),
Field('render', default="markmin",
readable=show_engine,
writable=show_engine,
requires=IS_EMPTY_OR(
IS_IN_SET(engines))),
auth.signature],
'vars': {'format': '%(title)s', 'migrate': migrate}}),
('wiki_tag', {
'args': [
Field('name'),
Field('wiki_page', 'reference wiki_page'),
auth.signature],
'vars':{'format': '%(title)s', 'migrate': migrate}}),
('wiki_media', {
'args': [
Field('wiki_page', 'reference wiki_page'),
Field('title', required=True),
Field('filename', 'upload', required=True),
auth.signature],
'vars': {'format': '%(title)s', 'migrate': migrate}}),
]
# define only non-existent tables
for key, value in table_definitions:
args = []
if not key in db.tables():
# look for wiki_ extra fields in auth.settings
extra_fields = auth.settings.extra_fields
if extra_fields:
if key in extra_fields:
if extra_fields[key]:
for field in extra_fields[key]:
args.append(field)
args += value['args']
db.define_table(key, *args, **value['vars'])
if self.settings.templates is None and not \
self.settings.manage_permissions:
self.settings.templates = db.wiki_page.tags.contains('template') & \
db.wiki_page.can_read.contains('everybody')
def update_tags_insert(page, id, db=db):
for tag in page.tags or []:
tag = tag.strip().lower()
if tag:
db.wiki_tag.insert(name=tag, wiki_page=id)
def update_tags_update(dbset, page, db=db):
page = dbset.select(limitby=(0, 1)).first()
db(db.wiki_tag.wiki_page == page.id).delete()
for tag in page.tags or []:
tag = tag.strip().lower()
if tag:
db.wiki_tag.insert(name=tag, wiki_page=page.id)
db.wiki_page._after_insert.append(update_tags_insert)
db.wiki_page._after_update.append(update_tags_update)
if (auth.user and
check_credentials(current.request, gae_login=False) and
not 'wiki_editor' in auth.user_groups.values() and
self.settings.groups == auth.user_groups.values()):
group = db.auth_group(role='wiki_editor')
gid = group.id if group else db.auth_group.insert(
role='wiki_editor')
auth.add_membership(gid)
settings.lock_keys = True
# WIKI ACCESS POLICY
def not_authorized(self, page=None):
raise HTTP(401)
def can_read(self, page):
if 'everybody' in page.can_read or not \
self.settings.manage_permissions:
return True
elif self.auth.user:
groups = self.settings.groups
if ('wiki_editor' in groups or
set(groups).intersection(set(page.can_read + page.can_edit)) or
page.created_by == self.auth.user.id):
return True
return False
def can_edit(self, page=None):
if not self.auth.user:
redirect(self.auth.settings.login_url)
groups = self.settings.groups
return ('wiki_editor' in groups or
(page is None and 'wiki_author' in groups) or
not page is None and (
set(groups).intersection(set(page.can_edit)) or
page.created_by == self.auth.user.id))
def can_manage(self):
if not self.auth.user:
return False
groups = self.settings.groups
return 'wiki_editor' in groups
def can_search(self):
return True
def can_see_menu(self):
if self.auth.user:
if self.settings.menu_groups is None:
return True
else:
groups = self.settings.groups
if any(t in self.settings.menu_groups for t in groups):
return True
return False
### END POLICY
def automenu(self):
"""adds the menu if not present"""
if (not self.wiki_menu_items and
self.settings.controller and
self.settings.function):
self.wiki_menu_items = self.menu(self.settings.controller,
self.settings.function)
current.response.menu += self.wiki_menu_items
def __call__(self):
request = current.request
settings = self.settings
settings.controller = settings.controller or request.controller
settings.function = settings.function or request.function
self.automenu()
zero = request.args(0) or 'index'
if zero and zero.isdigit():
return self.media(int(zero))
elif not zero or not zero.startswith('_'):
return self.read(zero)
elif zero == '_edit':
return self.edit(request.args(1) or 'index', request.args(2) or 0)
elif zero == '_editmedia':
return self.editmedia(request.args(1) or 'index')
elif zero == '_create':
return self.create()
elif zero == '_pages':
return self.pages()
elif zero == '_search':
return self.search()
elif zero == '_recent':
ipage = int(request.vars.page or 0)
query = self.auth.db.wiki_page.created_by == request.args(
1, cast=int)
return self.search(query=query,
orderby=~self.auth.db.wiki_page.created_on,
limitby=(ipage * self.rows_page,
(ipage + 1) * self.rows_page),
)
elif zero == '_cloud':
return self.cloud()
elif zero == '_preview':
return self.preview(self.get_renderer())
def first_paragraph(self, page):
if not self.can_read(page):
mm = (page.body or '').replace('\r', '')
ps = [p for p in mm.split('\n\n')
if not p.startswith('#') and p.strip()]
if ps:
return ps[0]
return ''
def fix_hostname(self, body):
return (body or '').replace('://HOSTNAME', '://%s' % self.host)
def read(self, slug, force_render=False):
if slug in '_cloud':
return self.cloud()
elif slug in '_search':
return self.search()
page = self.auth.db.wiki_page(slug=slug)
if page and (not self.can_read(page)):
return self.not_authorized(page)
if current.request.extension == 'html':
if not page:
url = URL(args=('_create', slug))
return dict(content=A('Create page "%s"' % slug, _href=url, _class="btn"))
else:
html = page.html if not force_render else self.get_renderer()(page)
content = XML(self.fix_hostname(html))
return dict(title=page.title,
slug=page.slug,
page=page,
content=content,
tags=page.tags,
created_on=page.created_on,
modified_on=page.modified_on)
elif current.request.extension == 'load':
return self.fix_hostname(page.html) if page else ''
else:
if not page:
raise HTTP(404)
else:
return dict(title=page.title,
slug=page.slug,
page=page,
content=page.body,
tags=page.tags,
created_on=page.created_on,
modified_on=page.modified_on)
def edit(self, slug, from_template=0):
auth = self.auth
db = auth.db
page = db.wiki_page(slug=slug)
if not self.can_edit(page):
return self.not_authorized(page)
title_guess = ' '.join(c.capitalize() for c in slug.split('-'))
if not page:
if not (self.can_manage() or
slug.startswith(self.settings.force_prefix)):
current.session.flash = 'slug must have "%s" prefix' \
% self.settings.force_prefix
redirect(URL(args=('_create')))
db.wiki_page.can_read.default = [Wiki.everybody]
db.wiki_page.can_edit.default = [auth.user_group_role()]
db.wiki_page.title.default = title_guess
db.wiki_page.slug.default = slug
if slug == 'wiki-menu':
db.wiki_page.body.default = \
'- Menu Item > @////index\n- - Submenu > http://web2py.com'
else:
db.wiki_page.body.default = db(db.wiki_page.id == from_template).select(db.wiki_page.body)[0].body \
if int(from_template) > 0 else '## %s\n\npage content' % title_guess
vars = current.request.post_vars
if vars.body:
vars.body = vars.body.replace('://%s' % self.host, '://HOSTNAME')
form = SQLFORM(db.wiki_page, page, deletable=True,
formstyle='table2cols', showid=False).process()
if form.deleted:
current.session.flash = 'page deleted'
redirect(URL())
elif form.accepted:
current.session.flash = 'page created'
redirect(URL(args=slug))
script = """
jQuery(function() {
if (!jQuery('#wiki_page_body').length) return;
var pagecontent = jQuery('#wiki_page_body');
pagecontent.css('font-family',
'Monaco,Menlo,Consolas,"Courier New",monospace');
var prevbutton = jQuery('<button class="btn nopreview">Preview</button>');
var preview = jQuery('<div id="preview"></div>').hide();
var previewmedia = jQuery('<div id="previewmedia"></div>');
var form = pagecontent.closest('form');
preview.insertBefore(form);
prevbutton.insertBefore(form);
if(%(link_media)s) {
var mediabutton = jQuery('<button class="btn nopreview">Media</button>');
mediabutton.insertBefore(form);
previewmedia.insertBefore(form);
mediabutton.click(function() {
if (mediabutton.hasClass('nopreview')) {
web2py_component('%(urlmedia)s', 'previewmedia');
} else {
previewmedia.empty();
}
mediabutton.toggleClass('nopreview');
});
}
prevbutton.click(function(e) {
e.preventDefault();
if (prevbutton.hasClass('nopreview')) {
prevbutton.addClass('preview').removeClass(
'nopreview').html('Edit Source');
try{var wiki_render = jQuery('#wiki_page_render').val()}
catch(e){var wiki_render = null;}
web2py_ajax_page('post', \
'%(url)s', {body: jQuery('#wiki_page_body').val(), \
render: wiki_render}, 'preview');
form.fadeOut('fast', function() {preview.fadeIn()});
} else {
prevbutton.addClass(
'nopreview').removeClass('preview').html('Preview');
preview.fadeOut('fast', function() {form.fadeIn()});
}
})
})
""" % dict(url=URL(args=('_preview', slug)), link_media=('true' if page else 'false'),
urlmedia=URL(extension='load',
args=('_editmedia', slug),
vars=dict(embedded=1)))
return dict(content=TAG[''](form, SCRIPT(script)))
def editmedia(self, slug):
auth = self.auth
db = auth.db
page = db.wiki_page(slug=slug)
if not (page and self.can_edit(page)):
return self.not_authorized(page)
self.auth.db.wiki_media.id.represent = lambda id, row: \
id if not row.filename else \
SPAN('@////%i/%s.%s' % (id, IS_SLUG.urlify(row.title.split('.')[0]), row.filename.split('.')[-1]))
self.auth.db.wiki_media.wiki_page.default = page.id
self.auth.db.wiki_media.wiki_page.writable = False
links = []
csv = True
create = True
if current.request.vars.embedded:
script = "var c = jQuery('#wiki_page_body'); c.val(c.val() + jQuery('%s').text()); return false;"
fragment = self.auth.db.wiki_media.id.represent
csv = False
create = False
links= [
lambda row:
A('copy into source', _href='#', _onclick=script % (fragment(row.id, row)))
]
content = SQLFORM.grid(
self.auth.db.wiki_media.wiki_page == page.id,
orderby=self.auth.db.wiki_media.title,
links=links,
csv=csv,
create=create,
args=['_editmedia', slug],
user_signature=False)
return dict(content=content)
def create(self):
if not self.can_edit():
return self.not_authorized()
db = self.auth.db
slugs = db(db.wiki_page.id > 0).select(db.wiki_page.id, db.wiki_page.slug)
options = [OPTION(row.slug, _value=row.id) for row in slugs]
options.insert(0, OPTION('', _value=''))
fields = [Field("slug", default=current.request.args(1) or
self.settings.force_prefix,
requires=(IS_SLUG(), IS_NOT_IN_DB(db, db.wiki_page.slug))),]
if self.settings.templates:
fields.append(
Field("from_template", "reference wiki_page",
requires=IS_EMPTY_OR(
IS_IN_DB(db(self.settings.templates),
db.wiki_page._id,
'%(slug)s')),
comment=current.T(
"Choose Template or empty for new Page")))
form = SQLFORM.factory(*fields, **dict(_class="well"))
form.element("[type=submit]").attributes["_value"] = \
current.T("Create Page from Slug")
if form.process().accepted:
form.vars.from_template = 0 if not form.vars.from_template \
else form.vars.from_template
redirect(URL(args=('_edit', form.vars.slug, form.vars.from_template or 0))) # added param
return dict(content=form)
def pages(self):
if not self.can_manage():
return self.not_authorized()
self.auth.db.wiki_page.slug.represent = lambda slug, row: SPAN(
'@////%s' % slug)
self.auth.db.wiki_page.title.represent = lambda title, row: \
A(title, _href=URL(args=row.slug))
wiki_table = self.auth.db.wiki_page
content = SQLFORM.grid(
wiki_table,
fields=[wiki_table.slug,
wiki_table.title, wiki_table.tags,
wiki_table.can_read, wiki_table.can_edit],
links=[
lambda row:
A('edit', _href=URL(args=('_edit', row.slug)), _class='btn'),
lambda row:
A('media', _href=URL(args=('_editmedia', row.slug)), _class='btn')],
details=False, editable=False, deletable=False, create=False,
orderby=self.auth.db.wiki_page.title,
args=['_pages'],
user_signature=False)
return dict(content=content)
def media(self, id):
request, response, db = current.request, current.response, self.auth.db
media = db.wiki_media(id)
if media:
if self.settings.manage_permissions:
page = db.wiki_page(media.wiki_page)
if not self.can_read(page):
return self.not_authorized(page)
request.args = [media.filename]
m = response.download(request, db)
current.session.forget() # get rid of the cookie
response.headers['Last-Modified'] = \
request.utcnow.strftime("%a, %d %b %Y %H:%M:%S GMT")
if 'Content-Disposition' in response.headers:
del response.headers['Content-Disposition']
response.headers['Pragma'] = 'cache'
response.headers['Cache-Control'] = 'private'
return m
else:
raise HTTP(404)
def menu(self, controller='default', function='index'):
db = self.auth.db
request = current.request
menu_page = db.wiki_page(slug='wiki-menu')
menu = []
if menu_page:
tree = {'': menu}
regex = re.compile('[\r\n\t]*(?P<base>(\s*\-\s*)+)(?P<title>\w.*?)\s+\>\s+(?P<link>\S+)')
for match in regex.finditer(self.fix_hostname(menu_page.body)):
base = match.group('base').replace(' ', '')
title = match.group('title')
link = match.group('link')
title_page = None
if link.startswith('@'):
items = link[2:].split('/')
if len(items) > 3:
title_page = items[3]
link = URL(a=items[0] or None, c=items[1] or controller,
f=items[2] or function, args=items[3:])
parent = tree.get(base[1:], tree[''])
subtree = []
tree[base] = subtree
parent.append((current.T(title),
request.args(0) == title_page,
link, subtree))
if self.can_see_menu():
submenu = []
menu.append((current.T('[Wiki]'), None, None, submenu))
if URL() == URL(controller, function):
if not str(request.args(0)).startswith('_'):
slug = request.args(0) or 'index'
mode = 1
elif request.args(0) == '_edit':
slug = request.args(1) or 'index'
mode = 2
elif request.args(0) == '_editmedia':
slug = request.args(1) or 'index'
mode = 3
else:
mode = 0
if mode in (2, 3):
submenu.append((current.T('View Page'), None,
URL(controller, function, args=slug)))
if mode in (1, 3):
submenu.append((current.T('Edit Page'), None,
URL(controller, function, args=('_edit', slug))))
if mode in (1, 2):
submenu.append((current.T('Edit Page Media'), None,
URL(controller, function, args=('_editmedia', slug))))
submenu.append((current.T('Create New Page'), None,
URL(controller, function, args=('_create'))))
# Moved next if to inside self.auth.user check
if self.can_manage():
submenu.append((current.T('Manage Pages'), None,
URL(controller, function, args=('_pages'))))
submenu.append((current.T('Edit Menu'), None,
URL(controller, function, args=('_edit', 'wiki-menu'))))
# Also moved inside self.auth.user check
submenu.append((current.T('Search Pages'), None,
URL(controller, function, args=('_search'))))
return menu
def search(self, tags=None, query=None, cloud=True, preview=True,
limitby=(0, 100), orderby=None):
if not self.can_search():
return self.not_authorized()
request = current.request
content = CAT()
if tags is None and query is None:
form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(),
value=request.vars.q),
INPUT(_type="submit", _value=current.T('Search')),
_method='GET')
content.append(DIV(form, _class='w2p_wiki_form'))
if request.vars.q:
tags = [v.strip() for v in request.vars.q.split(',')]
tags = [v.lower() for v in tags if v]
if tags or not query is None:
db = self.auth.db
count = db.wiki_tag.wiki_page.count()
fields = [db.wiki_page.id, db.wiki_page.slug,
db.wiki_page.title, db.wiki_page.tags,
db.wiki_page.can_read]
if preview:
fields.append(db.wiki_page.body)
if query is None:
query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\
(db.wiki_tag.name.belongs(tags))
query = query | db.wiki_page.title.contains(request.vars.q)
if self.settings.restrict_search and not self.manage():
query = query & (db.wiki_page.created_by == self.auth.user_id)
pages = db(query).select(count,
*fields, **dict(orderby=orderby or ~count,
groupby=reduce(lambda a, b: a | b, fields),
distinct=True,
limitby=limitby))
if request.extension in ('html', 'load'):
if not pages:
content.append(DIV(current.T("No results"),
_class='w2p_wiki_form'))
def link(t):
return A(t, _href=URL(args='_search', vars=dict(q=t)))
items = [DIV(H3(A(p.wiki_page.title, _href=URL(
args=p.wiki_page.slug))),
MARKMIN(self.first_paragraph(p.wiki_page))
if preview else '',
DIV(_class='w2p_wiki_tags',
*[link(t.strip()) for t in
p.wiki_page.tags or [] if t.strip()]),
_class='w2p_wiki_search_item')
for p in pages]
content.append(DIV(_class='w2p_wiki_pages', *items))
else:
cloud = False
content = [p.wiki_page.as_dict() for p in pages]
elif cloud:
content.append(self.cloud()['content'])
if request.extension == 'load':
return content
return dict(content=content)
def cloud(self):
db = self.auth.db
count = db.wiki_tag.wiki_page.count(distinct=True)
ids = db(db.wiki_tag).select(
db.wiki_tag.name, count,
distinct=True,
groupby=db.wiki_tag.name,
orderby=~count, limitby=(0, 20))
if ids:
a, b = ids[0](count), ids[-1](count)
def style(c):
STYLE = 'padding:0 0.2em;line-height:%.2fem;font-size:%.2fem'
size = (1.5 * (c - b) / max(a - b, 1) + 1.3)
return STYLE % (1.3, size)
items = []
for item in ids:
items.append(A(item.wiki_tag.name,
_style=style(item(count)),
_href=URL(args='_search',
vars=dict(q=item.wiki_tag.name))))
items.append(' ')
return dict(content=DIV(_class='w2p_cloud', *items))
def preview(self, render):
request = current.request
# FIXME: This is an ugly hack to ensure a default render
# engine if not specified (with multiple render engines)
if not "render" in request.post_vars:
request.post_vars.render = None
return render(request.post_vars)
class Config(object):
def __init__(
self,
filename,
section,
default_values={}
):
self.config = ConfigParser.ConfigParser(default_values)
self.config.read(filename)
if not self.config.has_section(section):
self.config.add_section(section)
self.section = section
self.filename = filename
def read(self):
if not(isinstance(current.session['settings_%s' % self.section], dict)):
settings = dict(self.config.items(self.section))
else:
settings = current.session['settings_%s' % self.section]
return settings
def save(self, options):
for option, value in options:
self.config.set(self.section, option, value)
try:
self.config.write(open(self.filename, 'w'))
result = True
except:
current.session['settings_%s' % self.section] = dict(self.config.items(self.section))
result = False
return result
if __name__ == '__main__':
import doctest
doctest.testmod()
| gluon/tools.py | 250,936 | !/bin/python -*- coding: utf-8 -*- try stdlib (Python 2.6) try external module fallback to pure-Python module mind there are two loggers here (logger and crud.settings.logger)! seconds We don't want to use base64 encoding for unicode mail encoded or raw text Use multipart/mixed if there is attachments no encoding configuration for raw messages No charset passed to avoid transport encoding NOTE: some unicode encoded strings will produce unreadable mail contents. Construct mime part only if needed We have text and html we need multipart/alternative If there is attachments put text and html into multipart/mixed No attachments no multipart/mixed CIPHER GPGME Set GNUPGHOME environment variable to set home of gnupg need a python-pyme package and gpgme lib sign search for signing key for From: make a signature make it part of the email insert the origin payload insert the detached signature it's just a trick to handle the no encryption case encrypt collect the public keys for encryption make the encryption make it a part of the email X.509 crypt certfiles could be a string or a list need m2crypto SIGN key for signing Recreate coz sign() has consumed it. ENCRYPT make an encryption cert's stack Final stage in sign and encryption no cryptography process as usual for local testing: In case we get an error code, store it so we can get an error message from the /api/challenge URL as described in the reCAPTCHA api docs.use Google's ajax interface, needed for LOADed components this should only be used for catcha and perhaps not even for that one hour one month these are messages that can be customized next two lines for backward compatibility if we have auth info if not expired it, used it if expired, clear the session else, only clear auth info in the session this is a trick to speed up sessions to avoid many writes what happens after login? what happens after registration? these are messages that can be customized for "remember me" option when user wants to be logged in for longer Hold all menu items in a list The final User is logged in User is not logged in For inclusion in MENU Default web2py scaffolding Define custom modes. THIS IS NOT LAZY user unknown log messages should not be translated make a guess about who this user is if we think we found the user but registration_id does not match, make new user THINK MORE ABOUT THIS? DO WE TRUST OPENID PROVIDER? added for register_bare to avoid overwriting users user not in database try other login methods get or create did not create a user (it ignores duplicate records) If ticket is a service Ticket and RENEW flag respected assume version 2 Allow up to 4 attempts (the 1st one plus 3 more) use session for federated login pass important for security defaultSetup the default field used for the form If two-factor authentication is enabled, and the maximum number of tries allowed is used up, reset the session to pre-login state with two-factor auth Exceeded maximum allowed tries for this code. Require user to enter username and password again. Redirect to the default 'next' page without logging in. If that page requires login, user will be redirected back to the main login form Before showing the default login form, check whether we are already on the second step of two-step authentication. If we are, then skip this login form and use the form for the second challenge instead. Note to devs: The code inside the if-block is unchanged from the previous version of this file, other than for indentation inside to put it inside the if-block do we use our own login form, or from a central source? check for username in db if '@' in username check for email, not username user in db, check if registration pending or disabled try alternate logins 1st as these have the current version of the password do not store password in db alternates have failed, maybe because service inaccessible try logging in locally using cached credentials success user not in db we're allowed to auto-register users from external systems do not store password in db invalid login use a central authentication server we need to pass through login again before going on Extra login logic for two-factor authentication If the 'user' variable has a value, this means that the first authentication step was successful (i.e. user provided correct username and password at the first challenge). Check if this user is signed up for two-factor authentication Default rule is that the user must be part of a group that is called auth.settings.two_factor_authentication_group challenge accepted_form is used by some default web2py code later in the function that handles running specified functions before redirect Set it to False until the challenge form is accepted. Handle the case when a user has submitted the login/password form successfully, and the password has been validated, but the two-factor form has not been displayed or validated yet. store the validated user and associate with this session Allow user to try up to 4 times TODO: Add some error checking to handle cases where email cannot be sent Handle the case when the two-factor form has been successfully validated and the user was previously stored (the current user should be None because in this case, the previous username/password login form should not be displayed. This will allow the code after the 2-factor authentication block to proceed as normal. For security, because the username stored in the session somehow does not match the just validated user. Should not be possible without session stealing which is hard with SSL. Either way, the user and code associated with this session should be removed. This handles cases where the session login may have expired but browser window is open, so the old session key and session usernamem will still exist TODO: Limit the number of retries allowed. End login logic for two-factor authentication process authenticated users process authenticated users user wants to be logged in for longer how to continue Clear out 2-step authentication information if user logs out. This information is also cleared on successful login. Ensure the username field is unique.Add a message if specified make sure session has same user.registrato_key as db record response = current.response old_requires = table_user.email.requires interpret group_id as a role interpret group_id as a role experimental: build a chained query for all tables if resolve is set to True, process request as wiki call resolve=False allows initial setup without wiki redirection FIXME: .has_key() is deprecated We don't want to return a dict object, just the wiki contains hidden fix issue with 2.6 Only redirect when explicitif record_id and not self.has_permission('select', table): redirect(self.settings.auth.settings.on_failed_authorization) Nicer than an empty table. TODO deal with 'starts with', 'ends with', 'contains' on GAE hmmm, we should do better here next request will be a get, so no need to send the data again load cookies from the response Fill the arg_dict with name and value for the submitted, positional values n[pos_index] is the name of the argument There might be pos_args left, that are sent as named_values. Gather them as well. If a argument already is populated with values we simply replaces them. All the positional arguments is found. The function may now be called. However, we need to update the arg_dict with the values from the named arguments as well. Raise an error, the function cannot be called. jsonrpc 2.0 error types. records the following structure {code: (message,meaning)}hand over to version 2 of the protocol decoding error in json lib Batch handling do not add empty responses return nothing SOAPAction Process normal Soap Operation May want to consider populating a ticket here... return the soap response Return Web Service Description Return method help webpage Return general help and method list webpage @///function -> http://..../function http://...jpg -> <img src="http://...jpg/> or embed @{component:name} -> <script>embed component name</script> define only non-existent tables look for wiki_ extra fields in auth.settings WIKI ACCESS POLICY END POLICY added param get rid of the cookie Moved next if to inside self.auth.user check Also moved inside self.auth.user check FIXME: This is an ugly hack to ensure a default render engine if not specified (with multiple render engines) | 8,611 | en | 0.860975 |
"""
WSGI config for car_selling_parts project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'car_selling_parts.settings')
application = get_wsgi_application()
| car_selling_parts/wsgi.py | 411 | WSGI config for car_selling_parts project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ | 223 | en | 0.76024 |
'''
UnrealCV
========
Provides functions to interact with games built using Unreal Engine.
>>> import unrealcv
>>> (HOST, PORT) = ('localhost', 9000)
>>> client = unrealcv.Client((HOST, PORT))
'''
import sys, ctypes, struct, threading, socket, re, time, logging
try:
from Queue import Queue
except:
from queue import Queue # for Python 3
_L = logging.getLogger(__name__)
# _L.addHandler(logging.NullHandler()) # Let client to decide how to do logging
_L.handlers = []
h = logging.StreamHandler()
h.setFormatter(logging.Formatter('%(levelname)s:%(module)s:%(lineno)d:%(message)s'))
_L.addHandler(h)
_L.propagate = False
_L.setLevel(logging.INFO)
fmt = 'I'
class SocketMessage(object):
'''
Define the format of a message. This class is defined similar to the class FNFSMessageHeader in UnrealEngine4, but without CRC check.
The magic number is from Unreal implementation
See https://github.com/EpicGames/UnrealEngine/blob/dff3c48be101bb9f84633a733ef79c91c38d9542/Engine/Source/Runtime/Sockets/Public/NetworkMessage.h
'''
magic = ctypes.c_uint32(0x9E2B83C1).value
def __init__(self, payload):
self.magic = SocketMessage.magic
self.payload_size = ctypes.c_uint32(len(payload)).value
@classmethod
def ReceivePayload(cls, socket):
'''
Return only payload, not the raw message, None if failed.
socket: a blocking socket for read data.
'''
# rbufsize = -1 # From SocketServer.py
rbufsize = 0
rfile = socket.makefile('rb', rbufsize)
_L.debug('read raw_magic %s', threading.current_thread().name)
try:
raw_magic = rfile.read(4) # socket is disconnected or invalid
except Exception as e:
_L.debug('Fail to read raw_magic, %s', e)
raw_magic = None
_L.debug('read raw_magic %s done: %s', threading.current_thread().name, repr(raw_magic))
if not raw_magic: # nothing to read
# _L.debug('socket disconnect')
return None
# print 'Receive raw magic: %d, %s' % (len(raw_magic), raw_magic)
magic = struct.unpack(fmt, raw_magic)[0] # 'I' means unsigned int
# print 'Receive magic:', magic
if magic != cls.magic:
_L.error('Error: receive a malformat message, the message should start from a four bytes uint32 magic number')
return None
# The next time it will read four bytes again
_L.debug('read payload')
raw_payload_size = rfile.read(4)
# print 'Receive raw payload size: %d, %s' % (len(raw_payload_size), raw_payload_size)
payload_size = struct.unpack('I', raw_payload_size)[0]
_L.debug('Receive payload size %d', payload_size)
# if the message is incomplete, should wait until all the data received
payload = b""
remain_size = payload_size
while remain_size > 0:
data = rfile.read(remain_size)
if not data:
return None
payload += data
bytes_read = len(data) # len(data) is its string length, but we want length of bytes
# print 'bytes_read %d, remain_size %d, read_str %s' % (bytes_read, remain_size, data)
assert(bytes_read <= remain_size)
remain_size -= bytes_read
rfile.close()
return payload
@classmethod
def WrapAndSendPayload(cls, socket, payload):
'''
Send payload, true if success, false if failed
'''
try:
# From SocketServer.py
# wbufsize = 0, flush immediately
wbufsize = -1
# Convert
socket_message = SocketMessage(payload)
wfile = socket.makefile('wb', wbufsize)
# Write the message
wfile.write(struct.pack(fmt, socket_message.magic))
# Need to send the packed version
# print 'Sent ', socket_message.magic
wfile.write(struct.pack(fmt, socket_message.payload_size))
# print 'Sent ', socket_message.payload_size
wfile.write(payload)
# print 'Sent ', payload
wfile.flush()
wfile.close() # Close file object, not close the socket
return True
except Exception as e:
_L.error('Fail to send message %s', e)
return False
class BaseClient(object):
'''
BaseClient send message out and receiving message in a seperate thread.
After calling the `send` function, only True or False will be returned
to indicate whether the operation was successful.
If you are trying to send a request and get a response, consider using `Client` instead.
This class adds message framing on top of TCP
'''
def __init__(self, endpoint, raw_message_handler):
'''
Parameters:
endpoint: a tuple (ip, port)
message_handler: a function defined as `def message_handler(msg)` to handle incoming message, msg is a string
'''
self.endpoint = endpoint
self.raw_message_handler = raw_message_handler
self.socket = None # if socket == None, means client is not connected
self.wait_connected = threading.Event()
# Start a thread to get data from the socket
receiving_thread = threading.Thread(target = self.__receiving)
receiving_thread.setDaemon(1)
receiving_thread.start()
def connect(self, timeout = 1):
'''
Try to connect to server, return whether connection successful
'''
if self.isconnected():
return True
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(self.endpoint)
self.socket = s
_L.debug('BaseClient: wait for connection confirm')
self.wait_connected.clear()
isset = self.wait_connected.wait(timeout)
assert(isset != None) # in python prior to 2.7 wait will return None
if isset:
return True
else:
self.socket = None
_L.error('Socket is created, but can not get connection confirm from %s, timeout after %.2f seconds', self.endpoint, timeout)
return False
# only assign self.socket to connected socket
# so it is safe to use self.socket != None to check connection status
# This does not neccessarily mean connection successful, might be closed by server
# Unless explicitly to tell the server to accept new socket
except Exception as e:
_L.error('Can not connect to %s', str(self.endpoint))
_L.error("Error %s", e)
self.socket = None
return False
def isconnected(self):
return self.socket is not None
def disconnect(self):
if self.isconnected():
_L.debug("BaseClient, request disconnect from server in %s", threading.current_thread().name)
self.socket.shutdown(socket.SHUT_RD)
# Because socket is on read in __receiving thread, need to call shutdown to force it to close
if self.socket: # This may also be set to None in the __receiving thread
self.socket.close()
self.socket = None
time.sleep(0.1) # TODO, this is tricky
def __receiving(self):
'''
Receive packages, Extract message from packages
Call self.message_handler if got a message
Also check whether client is still connected
'''
_L.debug('BaseClient start receiving in %s', threading.current_thread().name)
while True:
if self.isconnected():
# Only this thread is allowed to read from socket, otherwise need lock to avoid competing
message = SocketMessage.ReceivePayload(self.socket)
_L.debug('Got server raw message %s', message)
if not message:
_L.debug('BaseClient: remote disconnected, no more message')
self.socket = None
continue
if message.startswith(b'connected'):
_L.info('Got connection confirm: %s', repr(message))
self.wait_connected.set()
# self.wait_connected.clear()
continue
if self.raw_message_handler:
self.raw_message_handler(message) # will block this thread
else:
_L.error('No message handler for raw message %s', message)
def send(self, message):
'''
Send message out, return whether the message was successfully sent
'''
if self.isconnected():
_L.debug('BaseClient: Send message %s', self.socket)
SocketMessage.WrapAndSendPayload(self.socket, message)
return True
else:
_L.error('Fail to send message, client is not connected')
return False
class Client(object):
'''
Client can be used to send request to a game and get response
Currently only one client is allowed at a time
More clients will be rejected
'''
def __raw_message_handler(self, raw_message):
# print 'Waiting for message id %d' % self.message_id
match = self.raw_message_regexp.match(raw_message)
if match:
[message_id, message_body] = (int(match.group(1)), match.group(2)) # TODO: handle multiline response
message_body = raw_message[len(match.group(1))+1:]
# Convert to utf-8 if it's not a byte array (as is the case for images)
try:
message_body = message_body.decode('utf-8')
except UnicodeDecodeError:
pass
# print 'Received message id %s' % message_id
if message_id == self.message_id:
self.response = message_body
self.wait_response.set()
else:
assert(False)
else:
if self.message_handler:
def do_callback():
self.message_handler(raw_message)
self.queue.put(do_callback)
else:
# Instead of just dropping this message, give a verbose notice
_L.error('No message handler to handle message %s', raw_message)
def __init__(self, endpoint, message_handler=None):
self.raw_message_regexp = re.compile(b'(\d{1,8}):(.*)')
self.message_client = BaseClient(endpoint, self.__raw_message_handler)
self.message_handler = message_handler
self.message_id = 0
self.wait_response = threading.Event()
self.response = ''
self.isconnected = self.message_client.isconnected
self.connect = self.message_client.connect
self.disconnect = self.message_client.disconnect
self.queue = Queue()
self.main_thread = threading.Thread(target = self.worker)
self.main_thread.setDaemon(1)
self.main_thread.start()
def worker(self):
while True:
task = self.queue.get()
task()
self.queue.task_done()
def request(self, message, timeout=5):
# docstring in numpy style
"""
Send a request to server and wait util get a response from server or timeout.
Parameters
----------
cmd : str
command to control the game. More info can be seen from http://docs.unrealcv.org/en/master/reference/commands.html
Returns
-------
str
plain text message from server
Examples
--------
>>> client = Client('localhost', 9000)
>>> client.connect()
>>> response = client.request('vget /camera/0/view')
"""
if sys.version_info[0] == 3:
if not isinstance(message, bytes):
message = message.encode("utf-8")
def do_request():
raw_message = b'%d:%s' % (self.message_id, message)
_L.debug('Request: %s', raw_message.decode("utf-8"))
if not self.message_client.send(raw_message):
return None
# request can only be sent in the main thread, do not support multi-thread submitting request together
if threading.current_thread().name == self.main_thread.name:
do_request()
else:
self.queue.put(do_request)
# Timeout is required
# see: https://bugs.python.org/issue8844
self.wait_response.clear() # This is important
isset = self.wait_response.wait(timeout)
self.message_id += 1 # Increment it only after the request/response cycle finished
assert(isset != None) # only python prior to 2.7 will return None
if isset:
return self.response
else:
_L.error('Can not receive a response from server, timeout after %.2f seconds', timeout)
return None
(HOST, PORT) = ('localhost', 9000)
client = Client((HOST, PORT), None)
| client/python/unrealcv/__init__.py | 13,121 | BaseClient send message out and receiving message in a seperate thread.
After calling the `send` function, only True or False will be returned
to indicate whether the operation was successful.
If you are trying to send a request and get a response, consider using `Client` instead.
This class adds message framing on top of TCP
Client can be used to send request to a game and get response
Currently only one client is allowed at a time
More clients will be rejected
Define the format of a message. This class is defined similar to the class FNFSMessageHeader in UnrealEngine4, but without CRC check.
The magic number is from Unreal implementation
See https://github.com/EpicGames/UnrealEngine/blob/dff3c48be101bb9f84633a733ef79c91c38d9542/Engine/Source/Runtime/Sockets/Public/NetworkMessage.h
Return only payload, not the raw message, None if failed.
socket: a blocking socket for read data.
Send payload, true if success, false if failed
Parameters:
endpoint: a tuple (ip, port)
message_handler: a function defined as `def message_handler(msg)` to handle incoming message, msg is a string
Receive packages, Extract message from packages
Call self.message_handler if got a message
Also check whether client is still connected
Try to connect to server, return whether connection successful
Send a request to server and wait util get a response from server or timeout.
Parameters
----------
cmd : str
command to control the game. More info can be seen from http://docs.unrealcv.org/en/master/reference/commands.html
Returns
-------
str
plain text message from server
Examples
--------
>>> client = Client('localhost', 9000)
>>> client.connect()
>>> response = client.request('vget /camera/0/view')
Send message out, return whether the message was successfully sent
UnrealCV
========
Provides functions to interact with games built using Unreal Engine.
>>> import unrealcv
>>> (HOST, PORT) = ('localhost', 9000)
>>> client = unrealcv.Client((HOST, PORT))
for Python 3 _L.addHandler(logging.NullHandler()) Let client to decide how to do logging rbufsize = -1 From SocketServer.py socket is disconnected or invalid nothing to read _L.debug('socket disconnect') print 'Receive raw magic: %d, %s' % (len(raw_magic), raw_magic) 'I' means unsigned int print 'Receive magic:', magic The next time it will read four bytes again print 'Receive raw payload size: %d, %s' % (len(raw_payload_size), raw_payload_size) if the message is incomplete, should wait until all the data received len(data) is its string length, but we want length of bytes print 'bytes_read %d, remain_size %d, read_str %s' % (bytes_read, remain_size, data) From SocketServer.py wbufsize = 0, flush immediately Convert Write the message Need to send the packed version print 'Sent ', socket_message.magic print 'Sent ', socket_message.payload_size print 'Sent ', payload Close file object, not close the socket if socket == None, means client is not connected Start a thread to get data from the socket in python prior to 2.7 wait will return None only assign self.socket to connected socket so it is safe to use self.socket != None to check connection status This does not neccessarily mean connection successful, might be closed by server Unless explicitly to tell the server to accept new socket Because socket is on read in __receiving thread, need to call shutdown to force it to close This may also be set to None in the __receiving thread TODO, this is tricky Only this thread is allowed to read from socket, otherwise need lock to avoid competing self.wait_connected.clear() will block this thread print 'Waiting for message id %d' % self.message_id TODO: handle multiline response Convert to utf-8 if it's not a byte array (as is the case for images) print 'Received message id %s' % message_id Instead of just dropping this message, give a verbose notice docstring in numpy style request can only be sent in the main thread, do not support multi-thread submitting request together Timeout is required see: https://bugs.python.org/issue8844 This is important Increment it only after the request/response cycle finished only python prior to 2.7 will return None | 4,145 | en | 0.799368 |
from setuptools import setup, find_packages
from codecs import open # To use a consistent encoding
from os import path
import templar
version = templar.__version__
dependencies = [
'jinja2==2.8',
]
setup(
name='templar',
version=version,
description='A static templating engine written in Python',
url='https://github.com/albert12132/templar',
author='Albert Wu',
author_email='albert12132@gmail.com',
license='MIT',
keywords=['templating', 'static template', 'markdown'],
packages=find_packages(exclude=['tests*']),
install_requires=dependencies,
entry_points={
'console_scripts': [
'templar=templar.cli.templar:main',
'markdown=templar.markdown:main',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Text Processing :: Markup :: HTML',
],
)
| setup.py | 1,131 | To use a consistent encoding | 28 | en | 0.663774 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetListenerResult',
'AwaitableGetListenerResult',
'get_listener',
'get_listener_output',
]
warnings.warn("""aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener""", DeprecationWarning)
@pulumi.output_type
class GetListenerResult:
"""
A collection of values returned by getListener.
"""
def __init__(__self__, alpn_policy=None, arn=None, certificate_arn=None, default_actions=None, id=None, load_balancer_arn=None, port=None, protocol=None, ssl_policy=None, tags=None):
if alpn_policy and not isinstance(alpn_policy, str):
raise TypeError("Expected argument 'alpn_policy' to be a str")
pulumi.set(__self__, "alpn_policy", alpn_policy)
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if certificate_arn and not isinstance(certificate_arn, str):
raise TypeError("Expected argument 'certificate_arn' to be a str")
pulumi.set(__self__, "certificate_arn", certificate_arn)
if default_actions and not isinstance(default_actions, list):
raise TypeError("Expected argument 'default_actions' to be a list")
pulumi.set(__self__, "default_actions", default_actions)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if load_balancer_arn and not isinstance(load_balancer_arn, str):
raise TypeError("Expected argument 'load_balancer_arn' to be a str")
pulumi.set(__self__, "load_balancer_arn", load_balancer_arn)
if port and not isinstance(port, int):
raise TypeError("Expected argument 'port' to be a int")
pulumi.set(__self__, "port", port)
if protocol and not isinstance(protocol, str):
raise TypeError("Expected argument 'protocol' to be a str")
pulumi.set(__self__, "protocol", protocol)
if ssl_policy and not isinstance(ssl_policy, str):
raise TypeError("Expected argument 'ssl_policy' to be a str")
pulumi.set(__self__, "ssl_policy", ssl_policy)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="alpnPolicy")
def alpn_policy(self) -> str:
return pulumi.get(self, "alpn_policy")
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> str:
return pulumi.get(self, "certificate_arn")
@property
@pulumi.getter(name="defaultActions")
def default_actions(self) -> Sequence['outputs.GetListenerDefaultActionResult']:
return pulumi.get(self, "default_actions")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> str:
return pulumi.get(self, "load_balancer_arn")
@property
@pulumi.getter
def port(self) -> int:
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> str:
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> str:
return pulumi.get(self, "ssl_policy")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
return pulumi.get(self, "tags")
class AwaitableGetListenerResult(GetListenerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetListenerResult(
alpn_policy=self.alpn_policy,
arn=self.arn,
certificate_arn=self.certificate_arn,
default_actions=self.default_actions,
id=self.id,
load_balancer_arn=self.load_balancer_arn,
port=self.port,
protocol=self.protocol,
ssl_policy=self.ssl_policy,
tags=self.tags)
def get_listener(arn: Optional[str] = None,
load_balancer_arn: Optional[str] = None,
port: Optional[int] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetListenerResult:
"""
> **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.
Provides information about a Load Balancer Listener.
This data source can prove useful when a module accepts an LB Listener as an input variable and needs to know the LB it is attached to, or other information specific to the listener in question.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
config = pulumi.Config()
listener_arn = config.require("listenerArn")
listener = aws.lb.get_listener(arn=listener_arn)
selected = aws.lb.get_load_balancer(name="default-public")
selected443 = aws.lb.get_listener(load_balancer_arn=selected.arn,
port=443)
```
:param str arn: ARN of the listener. Required if `load_balancer_arn` and `port` is not set.
:param str load_balancer_arn: ARN of the load balancer. Required if `arn` is not set.
:param int port: Port of the listener. Required if `arn` is not set.
"""
pulumi.log.warn("""get_listener is deprecated: aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener""")
__args__ = dict()
__args__['arn'] = arn
__args__['loadBalancerArn'] = load_balancer_arn
__args__['port'] = port
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:elasticloadbalancingv2/getListener:getListener', __args__, opts=opts, typ=GetListenerResult).value
return AwaitableGetListenerResult(
alpn_policy=__ret__.alpn_policy,
arn=__ret__.arn,
certificate_arn=__ret__.certificate_arn,
default_actions=__ret__.default_actions,
id=__ret__.id,
load_balancer_arn=__ret__.load_balancer_arn,
port=__ret__.port,
protocol=__ret__.protocol,
ssl_policy=__ret__.ssl_policy,
tags=__ret__.tags)
@_utilities.lift_output_func(get_listener)
def get_listener_output(arn: Optional[pulumi.Input[Optional[str]]] = None,
load_balancer_arn: Optional[pulumi.Input[Optional[str]]] = None,
port: Optional[pulumi.Input[Optional[int]]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetListenerResult]:
"""
> **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.
Provides information about a Load Balancer Listener.
This data source can prove useful when a module accepts an LB Listener as an input variable and needs to know the LB it is attached to, or other information specific to the listener in question.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
config = pulumi.Config()
listener_arn = config.require("listenerArn")
listener = aws.lb.get_listener(arn=listener_arn)
selected = aws.lb.get_load_balancer(name="default-public")
selected443 = aws.lb.get_listener(load_balancer_arn=selected.arn,
port=443)
```
:param str arn: ARN of the listener. Required if `load_balancer_arn` and `port` is not set.
:param str load_balancer_arn: ARN of the load balancer. Required if `arn` is not set.
:param int port: Port of the listener. Required if `arn` is not set.
"""
pulumi.log.warn("""get_listener is deprecated: aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener""")
...
| sdk/python/pulumi_aws/elasticloadbalancingv2/get_listener.py | 8,599 | A collection of values returned by getListener.
> **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.
Provides information about a Load Balancer Listener.
This data source can prove useful when a module accepts an LB Listener as an input variable and needs to know the LB it is attached to, or other information specific to the listener in question.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
config = pulumi.Config()
listener_arn = config.require("listenerArn")
listener = aws.lb.get_listener(arn=listener_arn)
selected = aws.lb.get_load_balancer(name="default-public")
selected443 = aws.lb.get_listener(load_balancer_arn=selected.arn,
port=443)
```
:param str arn: ARN of the listener. Required if `load_balancer_arn` and `port` is not set.
:param str load_balancer_arn: ARN of the load balancer. Required if `arn` is not set.
:param int port: Port of the listener. Required if `arn` is not set.
> **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.
Provides information about a Load Balancer Listener.
This data source can prove useful when a module accepts an LB Listener as an input variable and needs to know the LB it is attached to, or other information specific to the listener in question.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
config = pulumi.Config()
listener_arn = config.require("listenerArn")
listener = aws.lb.get_listener(arn=listener_arn)
selected = aws.lb.get_load_balancer(name="default-public")
selected443 = aws.lb.get_listener(load_balancer_arn=selected.arn,
port=443)
```
:param str arn: ARN of the listener. Required if `load_balancer_arn` and `port` is not set.
:param str load_balancer_arn: ARN of the load balancer. Required if `arn` is not set.
:param int port: Port of the listener. Required if `arn` is not set.
The provider-assigned unique ID for this managed resource.
coding=utf-8 *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** *** Do not edit by hand unless you're certain you know what you are doing! *** pylint: disable=using-constant-test | 2,154 | en | 0.664143 |
# modmerger framework
# by sphere
modmerger_version = 201
# Note: the following is from Warband 1.127 module system.
from modmerger_options import *
# list of current module components
# not in use atm
mod_components = [
"animations",
"constants",
"dialogs",
"factions",
"game_menus",
"info",
"info_pages",
"items",
"map_icons",
"meshes",
"mission_templates",
"music",
"particle_systems",
"parties",
"party_templates",
"postfx",
"presentations",
"quests",
"scenes",
"scene_props",
"scripts",
"simple_triggers",
"skills",
"skins",
"sounds",
"strings",
"tableau_materials",
"triggers",
"troops",
"variables",
]
# these are components that do not need to be branded
mod_components0=[
"info",
]
# These are the components requiring full import of symbols. Currently only "constants"
mod_components1=[
"constants",
]
# these are components which passes in variable with same name as the component name itself
mod_components2=[
"animations",
"dialogs",
"game_menus",
"info_pages",
"items",
"map_icons",
"meshes",
"particle_systems",
"parties",
"party_templates",
"presentations",
"quests",
"scenes",
"scene_props",
"scripts",
"simple_triggers",
"skills",
"skins",
"sounds",
"strings",
"triggers",
"troops",
]
# This is a list of components with a list of the important global variables defined in it)
mod_components3={
#"info": ["export_dir"], # export_dir
"variables" : ["reserved_variables"] , # reserved_variables
"music": ["tracks"], # tracks
"tableau_materials" : ["tableaus"] , # tableaus
"postfx" : ["postfx_params"], # postfx_params
"factions" :["factions","default_kingdom_relations"],
"mission_templates": [
"mission_templates",
"multiplayer_server_check_belfry_movement",
"multiplayer_server_spawn_bots",
"multiplayer_server_manage_bots",
"multiplayer_server_check_polls",
"multiplayer_server_check_end_map",
"multiplayer_once_at_the_first_frame",
"multiplayer_battle_window_opened",
"common_battle_mission_start",
"common_battle_tab_press",
"common_battle_init_banner",
"common_arena_fight_tab_press",
"common_custom_battle_tab_press",
"custom_battle_check_victory_condition",
"custom_battle_check_defeat_condition",
"common_battle_victory_display",
"common_siege_question_answered",
"common_custom_battle_question_answered",
"common_custom_siege_init",
"common_siege_init",
"common_music_situation_update",
"common_siege_ai_trigger_init",
"common_siege_ai_trigger_init_2",
"common_siege_ai_trigger_init_after_2_secs",
"common_siege_defender_reinforcement_check",
"common_siege_defender_reinforcement_archer_reposition",
"common_siege_attacker_reinforcement_check",
"common_siege_attacker_do_not_stall",
"common_battle_check_friendly_kills",
"common_battle_check_victory_condition",
"common_battle_victory_display",
"common_siege_refill_ammo",
"common_siege_check_defeat_condition",
"common_battle_order_panel",
"common_battle_order_panel_tick",
"common_battle_inventory",
"common_inventory_not_available",
"common_siege_init_ai_and_belfry",
"common_siege_move_belfry",
"common_siege_rotate_belfry",
"common_siege_assign_men_to_belfry",
"tournament_triggers",
],
}
# fix for mb vanilla
if module_sys_info["version"] <= 1011:
mod_components.remove("info_pages")
mod_components.remove("postfx")
mod_components3["mission_templates"] = [ #1011 version
"mission_templates",
"common_battle_mission_start",
"common_battle_tab_press",
"common_arena_fight_tab_press",
"common_custom_battle_tab_press",
"common_battle_victory_display",
"common_siege_question_answered",
"common_custom_battle_question_answered",
"common_custom_siege_init",
"common_siege_init",
"common_music_situation_update",
"common_siege_ai_trigger_init",
"common_siege_ai_trigger_init_2",
"common_siege_ai_trigger_init_after_2_secs",
"common_siege_defender_reinforcement_check",
"common_siege_defender_reinforcement_archer_reposition",
"common_siege_attacker_reinforcement_check",
"common_siege_attacker_do_not_stall",
"common_battle_check_friendly_kills",
"common_battle_check_victory_condition",
"common_battle_victory_display",
"common_siege_refill_ammo",
"common_siege_check_defeat_condition",
"common_battle_order_panel",
"common_battle_order_panel_tick",
"common_battle_inventory",
"common_inventory_not_available",
"common_siege_init_ai_and_belfry",
"common_siege_move_belfry",
"common_siege_rotate_belfry",
"common_siege_assign_men_to_belfry",
]
# gets the type of component on whether it is found in mod_components1 or mod_components2. Those not found in either are returned as 0
def get_component_type(component_name):
comp_type = 0
try:
mod_components1.index(component_name)
comp_type |= 1
except ValueError:
pass
try:
mod_components2.index(component_name)
comp_type |= 2
except ValueError:
pass
try:
mod_components3[component_name]
comp_type |= 4
except KeyError:
pass
return comp_type | src/modmerger_header.py | 5,886 | modmerger framework by sphere Note: the following is from Warband 1.127 module system. list of current module components not in use atm these are components that do not need to be branded These are the components requiring full import of symbols. Currently only "constants" these are components which passes in variable with same name as the component name itself This is a list of components with a list of the important global variables defined in it)"info": ["export_dir"], export_dir reserved_variables tracks tableaus postfx_params fix for mb vanilla1011 version gets the type of component on whether it is found in mod_components1 or mod_components2. Those not found in either are returned as 0 | 703 | en | 0.887087 |
#!/usr/bin/env python3
import json
import argparse
import datetime
class TicketManager:
ticketfile = '/Users/ben/ticketing/tickets.json'
def __init__(self: object, ticketfile: str='/Users/ben/Google Drive/code/ticketing/tickets.json')->object:
self.ticketfille = ticketfile
self.read_tickets()
def read_tickets(self)-> None:
self.tickets = json.load(open(self.ticketfile))
def write_tickets(self)-> None:
json.dump(self.tickets, open(self.ticketfile, "w"), indent=4)
def create_ticket(self, title="", desc="", dest="", due="", pri=0, completed=False):
ticket = {"title": title,
"desc": desc,
"for": dest,
"time_in": datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
"time_out": due,
"nice": pri,
"completed": completed
}
self.tickets[title] = ticket
self.write_tickets()
self.read_tickets()
def update_ticket(self, title, new_completed):
self.tickets[title]["completed"] = new_completed
self.write_tickets()
self.read_tickets()
def show_all_tickets(self):
for ticket in self.tickets.values():
print("""TICKET NAME: {}
\tTICKET DESCRIPTION: {}
\tTICKET CREATED: {}
\tTICKET DUE: {}
\tTICKET FOR: {}
\tTICKET DONE: {}
\tTICKET PRIORITY: {}
""".format(ticket['title'], ticket['desc'], ticket['time_in'], ticket['time_out'],
ticket['for'], ticket['completed'], ticket['nice']))
def show_unifnished(self):
flag = False
for ticket in self.tickets.values():
if not ticket['completed']:
flag = True
print("""TICKET NAME: {}
\tTICKET DESCRIPTION: {}
\tTICKET CREATED: {}
\tTICKET DUE: {}
\tTICKET FOR: {}
\tTICKET PRIORITY: {}
""".format(ticket['title'], ticket['desc'], ticket['time_in'], ticket['time_out'],
ticket['for'], ticket['nice']))
if not flag:
print("No Unfinished Tasks!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("--mode", action="store", dest="mode", default='ls')
parser.add_argument("--title", action="store", dest="title")
parser.add_argument("--desc", action="store", dest="desc")
parser.add_argument("--for", action="store", dest="dest")
parser.add_argument("--due", action="store", dest="time_out")
parser.add_argument("--pri", action="store", dest="nice")
parser.add_argument("--done", action="store_true",
dest="completed", default=False)
args = parser.parse_args()
tm = TicketManager("tickets.json")
if args.mode == "ls":
tm.show_unifnished()
elif args.mode == "ls2":
tm.show_all_tickets()
elif args.mode == "new" or args.mode == "add":
tm.create_ticket(title=args.title, desc=args.desc, dest=args.dest,
due=args.time_out, pri=args.nice, completed=args.completed)
print("New Task '{}' Added".format(args.title))
elif args.mode == "up":
tm.update_ticket(args.title, args.completed)
| worktickets.py | 3,242 | !/usr/bin/env python3 | 21 | fr | 0.448822 |
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
import logging
class TripleSoftmaxLoss(nn.Module):
def __init__(self,
model: SentenceTransformer,
sentence_embedding_dimension: int,
num_labels: int,
vocab,
document_coef: float = 0.4,
concatenation_sent_rep: bool = True,
concatenation_sent_difference: bool = True,
concatenation_sent_multiplication: bool = False):
super(TripleSoftmaxLoss, self).__init__()
self.model = model
self.num_labels = num_labels
self.hidden = 1000
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
self.document_coef = document_coef
num_vectors_concatenated = 0
if concatenation_sent_rep:
num_vectors_concatenated += 2
if concatenation_sent_difference:
num_vectors_concatenated += 2
logging.info("Softmax loss: #Vectors concatenated: {}".format(num_vectors_concatenated))
self.relu = nn.ReLU()
self.document2hidden = nn.Linear(291868, self.hidden)
self.hidden2output = nn.Linear(self.hidden, 768)
self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, document_rep: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
document_rep = self.relu(self.hidden2output(self.relu(self.document2hidden(document_rep.float()))))
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
vectors_concat.append(torch.abs(rep_a - document_rep))
features = torch.cat(vectors_concat, 1)
output = self.classifier(features)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = (1.0 - self.document_coef) * loss_fct(output, labels.view(-1))
loss -= self.document_coef * torch.sum(torch.cosine_similarity(document_rep, rep_b)) # todo: MMI가 들어가면 좋긴하겠다.
return loss
else:
return reps, output | sentence_transformers/losses/TripleSoftmaxLoss.py | 2,702 | todo: MMI가 들어가면 좋긴하겠다. | 22 | ko | 0.958513 |
import socket
import random
import os
import requests
import re
import github
import minecraft
import string
import sys
HOST = "irc.libera.chat"
PORT = 6667
NICK = "DoveBot"
#PASSWORD = os.getenv("PASSWORD")
CHANNEL = "#dovegaming"
SERVER = ""
readbuffer = ""
def send(message):
s.send(message)
print(message)
s = socket.socket()
s.connect((HOST, PORT))
send(bytes("NICK %s\r\n" % NICK, "UTF-8"))
send(bytes("USER %s %s %s :%s\r\n" % (NICK, NICK, NICK, NICK), "UTF-8"))
#s.send(bytes("PRIVMSG NickServ regain {} {}\r\n".format(NICK, PASSWORD), "UTF-8"))
#s.send(bytes("PRIVMSG NickServ identify {} {}\r\n".format(NICK, PASSWORD), "UTF-8"))
send(bytes("JOIN {}\r\n".format(CHANNEL), "UTF-8"))
#s.send(bytes("PRIVMSG NickServ :identify {}\r\n".format(PASSWORD), "UTF-8"))
readbuffer = readbuffer + s.recv(1024).decode("UTF-8")
temp = str.split(readbuffer, "\n")
readbuffer = temp.pop()
for line in temp:
SERVER = str.rstrip(line)[1:].split()[0]
print(str.rstrip(line))
while 1:
readbuffer = readbuffer + s.recv(1024).decode("UTF-8")
temp = str.split(readbuffer, "\n")
readbuffer = temp.pop()
for line in temp:
print(str.rstrip(line))
message = str.rstrip(line).split(" PRIVMSG {} :".format(CHANNEL))
if "PING" in line: send("PONG :{}\r\n".format(SERVER).encode("utf-8"))
msg = message[-1]
tokens = msg.split()
if msg == "$hello": send("PRIVMSG {} :Hello!\r\n".format(CHANNEL).encode("utf-8"))
if msg == "$ping": send("PRIVMSG {} :Pong!\r\n".format(CHANNEL).encode("utf-8"))
if msg == "$random": send("PRIVMSG {} :{}\r\n".format(CHANNEL, random.randint(0, 100)).encode("utf-8"))
if msg.startswith("$youtube "):
html = requests.get("https://www.youtube.com/results?search_query=" + " ".join(msg.split()[1:])).content
video_ids = re.findall(r"watch\?v=(\S{11})", html.decode())
send("PRIVMSG {} :https://www.youtube.com/watch?v={}\r\n".format(CHANNEL, video_ids[0]).encode("utf-8"))
#if msg.startswith("$google "): send("PRIVMSG {} :{}\r\n".format(CHANNEL, googlesearch.search(" ".join(msg.split()[1:]))[0]).encode("utf-8"))
#if msg.startswith("$wolfram "): send("PRIVMSG {} :{}\r\n".format(CHANNEL, wolfram.get(" ".join(msg.split()[1:]))).encode("utf-8"))
if msg.startswith("$github "):
if tokens[1] == "url": send("PRIVMSG {} :https://github.com/{}/{}\r\n".format(CHANNEL, tokens[2], tokens[3]).encode("utf-8"))
if tokens[1] == "issues": send("PRIVMSG {} :#{}: {}\r\n".format(CHANNEL, tokens[4], github.get_issue_title(tokens[2], tokens[3], tokens[4])).encode("utf-8"))
if msg == "$server": send("PRIVMSG {} :{}\r\n".format(CHANNEL, minecraft.get()).encode("utf-8"))
if msg == "$help": send("PRIVMSG {} :Avalible commands: $hello, $ping, $youtube, $google, $github, $wolfram.\r\n".format(CHANNEL).encode("utf-8"))
if msg.startswith("$help "):
if tokens[1] == "hello": send("PRIVMSG {} :Syntax: $hello Action: Says \"Hello!\".\r\n".format(CHANNEL).encode("utf-8"))
if tokens[1] == "ping":send("PRIVMSG {} :Syntax: $ping Action: Says \"Ping!\".\r\n".format(CHANNEL).encode("utf-8"))
if tokens[1] == "youtube": send("PRIVMSG {} :Syntax: $youtube <keyword> Action: Sends the URL of a YouTube video matching the keyword given.\r\n".format(CHANNEL).encode("utf-8"))
#if tokens[1] == "google": send("PRIVMSG {} :Syntax: $google <keyword> Action: Sends the URL of a google search with the keyword given\r\n".format(CHANNEL).encode("utf-8"))
if tokens[1] == "github": send("PRIVMSG {} :Syntax: $github <topic> <user> <repo> <number> Action: Returns data about a github repo.\r\n".format(CHANNEL).encode("utf-8"))
#if tokens[1] == "wolfram": send("PRIVMSG {} :Syntax: $wolfram <query> Action: Asks Wolfram|Alpha the query given.\r\n".format(CHANNEL).encode("utf-8"))
| runtime/bots/irc/main.py | 3,940 | PASSWORD = os.getenv("PASSWORD")s.send(bytes("PRIVMSG NickServ regain {} {}\r\n".format(NICK, PASSWORD), "UTF-8"))s.send(bytes("PRIVMSG NickServ identify {} {}\r\n".format(NICK, PASSWORD), "UTF-8"))s.send(bytes("PRIVMSG NickServ :identify {}\r\n".format(PASSWORD), "UTF-8"))if msg.startswith("$google "): send("PRIVMSG {} :{}\r\n".format(CHANNEL, googlesearch.search(" ".join(msg.split()[1:]))[0]).encode("utf-8"))if msg.startswith("$wolfram "): send("PRIVMSG {} :{}\r\n".format(CHANNEL, wolfram.get(" ".join(msg.split()[1:]))).encode("utf-8"))if tokens[1] == "google": send("PRIVMSG {} :Syntax: $google <keyword> Action: Sends the URL of a google search with the keyword given\r\n".format(CHANNEL).encode("utf-8"))if tokens[1] == "wolfram": send("PRIVMSG {} :Syntax: $wolfram <query> Action: Asks Wolfram|Alpha the query given.\r\n".format(CHANNEL).encode("utf-8")) | 866 | en | 0.325257 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from . import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = [
'GetAmiIdsResult',
'AwaitableGetAmiIdsResult',
'get_ami_ids',
]
warnings.warn("""aws.getAmiIds has been deprecated in favor of aws.ec2.getAmiIds""", DeprecationWarning)
@pulumi.output_type
class GetAmiIdsResult:
"""
A collection of values returned by getAmiIds.
"""
def __init__(__self__, executable_users=None, filters=None, id=None, ids=None, name_regex=None, owners=None, sort_ascending=None):
if executable_users and not isinstance(executable_users, list):
raise TypeError("Expected argument 'executable_users' to be a list")
pulumi.set(__self__, "executable_users", executable_users)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ids and not isinstance(ids, list):
raise TypeError("Expected argument 'ids' to be a list")
pulumi.set(__self__, "ids", ids)
if name_regex and not isinstance(name_regex, str):
raise TypeError("Expected argument 'name_regex' to be a str")
pulumi.set(__self__, "name_regex", name_regex)
if owners and not isinstance(owners, list):
raise TypeError("Expected argument 'owners' to be a list")
pulumi.set(__self__, "owners", owners)
if sort_ascending and not isinstance(sort_ascending, bool):
raise TypeError("Expected argument 'sort_ascending' to be a bool")
pulumi.set(__self__, "sort_ascending", sort_ascending)
@property
@pulumi.getter(name="executableUsers")
def executable_users(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "executable_users")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetAmiIdsFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
return pulumi.get(self, "ids")
@property
@pulumi.getter(name="nameRegex")
def name_regex(self) -> Optional[str]:
return pulumi.get(self, "name_regex")
@property
@pulumi.getter
def owners(self) -> Sequence[str]:
return pulumi.get(self, "owners")
@property
@pulumi.getter(name="sortAscending")
def sort_ascending(self) -> Optional[bool]:
return pulumi.get(self, "sort_ascending")
class AwaitableGetAmiIdsResult(GetAmiIdsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAmiIdsResult(
executable_users=self.executable_users,
filters=self.filters,
id=self.id,
ids=self.ids,
name_regex=self.name_regex,
owners=self.owners,
sort_ascending=self.sort_ascending)
def get_ami_ids(executable_users: Optional[Sequence[str]] = None,
filters: Optional[Sequence[pulumi.InputType['GetAmiIdsFilterArgs']]] = None,
name_regex: Optional[str] = None,
owners: Optional[Sequence[str]] = None,
sort_ascending: Optional[bool] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAmiIdsResult:
"""
Use this data source to get a list of AMI IDs matching the specified criteria.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
ubuntu = aws.ec2.get_ami_ids(filters=[aws.ec2.GetAmiIdsFilterArgs(
name="name",
values=["ubuntu/images/ubuntu-*-*-amd64-server-*"],
)],
owners=["099720109477"])
```
:param Sequence[str] executable_users: Limit search to users with *explicit* launch
permission on the image. Valid items are the numeric account ID or `self`.
:param Sequence[pulumi.InputType['GetAmiIdsFilterArgs']] filters: One or more name/value pairs to filter off of. There
are several valid keys, for a full reference, check out
[describe-images in the AWS CLI reference][1].
:param str name_regex: A regex string to apply to the AMI list returned
by AWS. This allows more advanced filtering not supported from the AWS API.
This filtering is done locally on what AWS returns, and could have a performance
impact if the result is large. It is recommended to combine this with other
options to narrow down the list AWS returns.
:param Sequence[str] owners: List of AMI owners to limit search. At least 1 value must be specified. Valid values: an AWS account ID, `self` (the current account), or an AWS owner alias (e.g. `amazon`, `aws-marketplace`, `microsoft`).
:param bool sort_ascending: Used to sort AMIs by creation time.
"""
pulumi.log.warn("""get_ami_ids is deprecated: aws.getAmiIds has been deprecated in favor of aws.ec2.getAmiIds""")
__args__ = dict()
__args__['executableUsers'] = executable_users
__args__['filters'] = filters
__args__['nameRegex'] = name_regex
__args__['owners'] = owners
__args__['sortAscending'] = sort_ascending
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:index/getAmiIds:getAmiIds', __args__, opts=opts, typ=GetAmiIdsResult).value
return AwaitableGetAmiIdsResult(
executable_users=__ret__.executable_users,
filters=__ret__.filters,
id=__ret__.id,
ids=__ret__.ids,
name_regex=__ret__.name_regex,
owners=__ret__.owners,
sort_ascending=__ret__.sort_ascending)
| sdk/python/pulumi_aws/get_ami_ids.py | 6,381 | A collection of values returned by getAmiIds.
Use this data source to get a list of AMI IDs matching the specified criteria.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
ubuntu = aws.ec2.get_ami_ids(filters=[aws.ec2.GetAmiIdsFilterArgs(
name="name",
values=["ubuntu/images/ubuntu-*-*-amd64-server-*"],
)],
owners=["099720109477"])
```
:param Sequence[str] executable_users: Limit search to users with *explicit* launch
permission on the image. Valid items are the numeric account ID or `self`.
:param Sequence[pulumi.InputType['GetAmiIdsFilterArgs']] filters: One or more name/value pairs to filter off of. There
are several valid keys, for a full reference, check out
[describe-images in the AWS CLI reference][1].
:param str name_regex: A regex string to apply to the AMI list returned
by AWS. This allows more advanced filtering not supported from the AWS API.
This filtering is done locally on what AWS returns, and could have a performance
impact if the result is large. It is recommended to combine this with other
options to narrow down the list AWS returns.
:param Sequence[str] owners: List of AMI owners to limit search. At least 1 value must be specified. Valid values: an AWS account ID, `self` (the current account), or an AWS owner alias (e.g. `amazon`, `aws-marketplace`, `microsoft`).
:param bool sort_ascending: Used to sort AMIs by creation time.
The provider-assigned unique ID for this managed resource.
coding=utf-8 *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** *** Do not edit by hand unless you're certain you know what you are doing! *** pylint: disable=using-constant-test | 1,739 | en | 0.750143 |
# header files
import torch
import torch.nn as nn
import torchvision
import numpy as np
# define network (remember input size: (224 x 224 x 3))
class DenseNet_121(torch.nn.Module):
# define dense block
def dense_block(self, input_channels):
return torch.nn.Sequential(
torch.nn.Conv2d(input_channels, 128, kernel_size=1, bias=False),
torch.nn.BatchNorm2d(128),
torch.nn.ReLU(inplace=True),
torch.nn.Conv2d(128, 32, kernel_size=3, padding=1, bias=False),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU(inplace=True)
)
# init function
def __init__(self, num_classes = 2):
super(DenseNet_121, self).__init__()
self.features = torch.nn.Sequential(
torch.nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
# dense block 1 (56 x 56 x 64)
self.dense_block_1_1 = self.dense_block(64)
self.dense_block_1_2 = self.dense_block(96)
self.dense_block_1_3 = self.dense_block(128)
self.dense_block_1_4 = self.dense_block(160)
self.dense_block_1_5 = self.dense_block(192)
self.dense_block_1_6 = self.dense_block(224)
# transition block 1
self.transition_block_1 = torch.nn.Sequential(
torch.nn.Conv2d(256, 128, kernel_size=1, bias=False),
torch.nn.AvgPool2d(kernel_size=2, stride=2)
)
# dense block 2 (28 x 28 x 128)
self.dense_block_2_1 = self.dense_block(128)
self.dense_block_2_2 = self.dense_block(160)
self.dense_block_2_3 = self.dense_block(192)
self.dense_block_2_4 = self.dense_block(224)
self.dense_block_2_5 = self.dense_block(256)
self.dense_block_2_6 = self.dense_block(288)
self.dense_block_2_7 = self.dense_block(320)
self.dense_block_2_8 = self.dense_block(352)
self.dense_block_2_9 = self.dense_block(384)
self.dense_block_2_10 = self.dense_block(416)
self.dense_block_2_11 = self.dense_block(448)
self.dense_block_2_12 = self.dense_block(480)
# transition block 2
self.transition_block_2 = torch.nn.Sequential(
torch.nn.Conv2d(512, 256, kernel_size=1, bias=False),
torch.nn.AvgPool2d(kernel_size=2, stride=2)
)
# dense block 3 (14 x 14 x 240)
self.dense_block_3_1 = self.dense_block(256)
self.dense_block_3_2 = self.dense_block(288)
self.dense_block_3_3 = self.dense_block(320)
self.dense_block_3_4 = self.dense_block(352)
self.dense_block_3_5 = self.dense_block(384)
self.dense_block_3_6 = self.dense_block(416)
self.dense_block_3_7 = self.dense_block(448)
self.dense_block_3_8 = self.dense_block(480)
self.dense_block_3_9 = self.dense_block(512)
self.dense_block_3_10 = self.dense_block(544)
self.dense_block_3_11 = self.dense_block(576)
self.dense_block_3_12 = self.dense_block(608)
self.dense_block_3_13 = self.dense_block(640)
self.dense_block_3_14 = self.dense_block(672)
self.dense_block_3_15 = self.dense_block(704)
self.dense_block_3_16 = self.dense_block(736)
self.dense_block_3_17 = self.dense_block(768)
self.dense_block_3_18 = self.dense_block(800)
self.dense_block_3_19 = self.dense_block(832)
self.dense_block_3_20 = self.dense_block(864)
self.dense_block_3_21 = self.dense_block(896)
self.dense_block_3_22 = self.dense_block(928)
self.dense_block_3_23 = self.dense_block(960)
self.dense_block_3_24 = self.dense_block(992)
# transition block 3
self.transition_block_3 = torch.nn.Sequential(
torch.nn.Conv2d(1024, 512, kernel_size=1, bias=False),
torch.nn.AvgPool2d(kernel_size=2, stride=2)
)
# dense block 4 (7 x 7 x 512)
self.dense_block_4_1 = self.dense_block(512)
self.dense_block_4_2 = self.dense_block(544)
self.dense_block_4_3 = self.dense_block(576)
self.dense_block_4_4 = self.dense_block(608)
self.dense_block_4_5 = self.dense_block(640)
self.dense_block_4_6 = self.dense_block(672)
self.dense_block_4_7 = self.dense_block(704)
self.dense_block_4_8 = self.dense_block(736)
self.dense_block_4_9 = self.dense_block(768)
self.dense_block_4_10 = self.dense_block(800)
self.dense_block_4_11 = self.dense_block(832)
self.dense_block_4_12 = self.dense_block(864)
self.dense_block_4_13 = self.dense_block(896)
self.dense_block_4_14 = self.dense_block(928)
self.dense_block_4_15 = self.dense_block(960)
self.dense_block_4_16 = self.dense_block(992)
self.avgpool = torch.nn.AdaptiveAvgPool2d(7)
self.classifier = torch.nn.Sequential(
torch.nn.Linear(1024 * 7 * 7, num_classes)
)
def forward(self, x):
x = self.features(x)
# dense block 1
x_1 = self.dense_block_1_1(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_1_2(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_1_3(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_1_4(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_1_5(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_1_6(x)
x = torch.cat([x, x_1], 1)
# transition block 1
x = self.transition_block_1(x)
# dense block 2
x_1 = self.dense_block_2_1(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_2(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_3(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_4(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_5(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_6(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_7(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_8(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_9(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_10(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_11(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_12(x)
x = torch.cat([x, x_1], 1)
# transition block 2
x = self.transition_block_2(x)
# dense block 3
x_1 = self.dense_block_3_1(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_2(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_3(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_4(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_5(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_6(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_7(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_8(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_9(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_10(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_11(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_12(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_13(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_14(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_15(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_16(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_17(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_18(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_19(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_20(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_21(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_22(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_23(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_24(x)
x = torch.cat([x, x_1], 1)
# transition block 3
x = self.transition_block_3(x)
# dense block 4
x_1 = self.dense_block_4_1(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_2(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_3(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_4(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_5(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_6(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_7(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_8(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_9(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_10(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_11(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_12(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_13(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_14(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_15(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_16(x)
x = torch.cat([x, x_1], 1)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
| models/densenet121.py | 9,997 | header files define network (remember input size: (224 x 224 x 3)) define dense block init function dense block 1 (56 x 56 x 64) transition block 1 dense block 2 (28 x 28 x 128) transition block 2 dense block 3 (14 x 14 x 240) transition block 3 dense block 4 (7 x 7 x 512) dense block 1 transition block 1 dense block 2 transition block 2 dense block 3 transition block 3 dense block 4 | 386 | en | 0.501133 |
"""Illustrates a method to intercept changes on objects, turning
an UPDATE statement on a single row into an INSERT statement, so that a new
row is inserted with the new data, keeping the old row intact.
This example adds a numerical version_id to the Versioned class as well
as the ability to see which row is the most "current" vesion.
"""
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import attributes
from sqlalchemy.orm import backref
from sqlalchemy.orm import column_property
from sqlalchemy.orm import make_transient
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
class Versioned(object):
# we have a composite primary key consisting of "id"
# and "version_id"
id = Column(Integer, primary_key=True)
version_id = Column(Integer, primary_key=True, default=1)
# optional - add a persisted is_current_version column
is_current_version = Column(Boolean, default=True)
# optional - add a calculated is_current_version column
@classmethod
def __declare_last__(cls):
alias = cls.__table__.alias()
cls.calc_is_current_version = column_property(
select(func.max(alias.c.version_id) == cls.version_id).where(
alias.c.id == cls.id
)
)
def new_version(self, session):
# optional - set previous version to have is_current_version=False
old_id = self.id
session.query(self.__class__).filter_by(id=old_id).update(
values=dict(is_current_version=False), synchronize_session=False
)
# make us transient (removes persistent
# identity).
make_transient(self)
# increment version_id, which means we have a new PK.
self.version_id += 1
@event.listens_for(Session, "before_flush")
def before_flush(session, flush_context, instances):
for instance in session.dirty:
if not isinstance(instance, Versioned):
continue
if not session.is_modified(instance, passive=True):
continue
if not attributes.instance_state(instance).has_identity:
continue
# make it transient
instance.new_version(session)
# re-add
session.add(instance)
Base = declarative_base()
engine = create_engine("sqlite://", echo=True)
Session = sessionmaker(engine)
# example 1, simple versioning
class Example(Versioned, Base):
__tablename__ = "example"
data = Column(String)
Base.metadata.create_all(engine)
session = Session()
e1 = Example(id=1, data="e1")
session.add(e1)
session.commit()
e1.data = "e2"
session.commit()
assert (
session.query(
Example.id,
Example.version_id,
Example.is_current_version,
Example.calc_is_current_version,
Example.data,
)
.order_by(Example.id, Example.version_id)
.all()
== ([(1, 1, False, False, "e1"), (1, 2, True, True, "e2")])
)
# example 2, versioning with a parent
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
child_id = Column(Integer)
child_version_id = Column(Integer)
child = relationship("Child", backref=backref("parent", uselist=False))
__table_args__ = (
ForeignKeyConstraint(
["child_id", "child_version_id"], ["child.id", "child.version_id"]
),
)
class Child(Versioned, Base):
__tablename__ = "child"
data = Column(String)
def new_version(self, session):
# expire parent's reference to us
session.expire(self.parent, ["child"])
# create new version
Versioned.new_version(self, session)
# re-add ourselves to the parent. this causes the
# parent foreign key to be updated also
self.parent.child = self
Base.metadata.create_all(engine)
session = Session()
p1 = Parent(child=Child(id=1, data="c1"))
session.add(p1)
session.commit()
p1.child.data = "c2"
session.commit()
assert p1.child_id == 1
assert p1.child.version_id == 2
assert (
session.query(
Child.id,
Child.version_id,
Child.is_current_version,
Child.calc_is_current_version,
Child.data,
)
.order_by(Child.id, Child.version_id)
.all()
== ([(1, 1, False, False, "c1"), (1, 2, True, True, "c2")])
)
| examples/versioned_rows/versioned_rows_w_versionid.py | 4,659 | Illustrates a method to intercept changes on objects, turning
an UPDATE statement on a single row into an INSERT statement, so that a new
row is inserted with the new data, keeping the old row intact.
This example adds a numerical version_id to the Versioned class as well
as the ability to see which row is the most "current" vesion.
we have a composite primary key consisting of "id" and "version_id" optional - add a persisted is_current_version column optional - add a calculated is_current_version column optional - set previous version to have is_current_version=False make us transient (removes persistent identity). increment version_id, which means we have a new PK. make it transient re-add example 1, simple versioning example 2, versioning with a parent expire parent's reference to us create new version re-add ourselves to the parent. this causes the parent foreign key to be updated also | 906 | en | 0.798394 |
import numpy as np
from testbed.cluster_env import LraClusterEnv
from testbed.PolicyGradient_CPO import PolicyGradient
params = {
# 'path': "Dynamic_large_100",
# 'path': "Dynamic_large_100_limit10",
# 'number of containers': 81,
'learning rate': 0.015,
'nodes per group': 3,
'number of nodes in the cluster': 27,
'container_limitation per node':8
}
def handle_constraint(observation, NUM_NODES):
observation_original = observation.copy()
mapping_index = []
# TODO: we could add more constraints here
list_check = observation[:, :].sum(1) > params['container_limitation per node'] - 1 # >8
if sum(list_check) == NUM_NODES:
return [],[]
good_index = np.where(list_check == False)[0]
length = len(good_index)
index_replace = 0
for node in range(NUM_NODES):
if list_check[node]: # bad node
# index_this_replace = good_index[np.random.randint(length)]
index_this_replace = good_index[index_replace % length]
index_replace += 1
observation[node] = observation_original[index_this_replace]
mapping_index.append(index_this_replace)
else:
mapping_index.append(node)
observation[node] = observation_original[node]
return observation, mapping_index
class NineNodeAPI():
def __init__(self, path_name, surffix, path_surffix):
"""
parameters set
"""
self.NUM_NODES = params['number of nodes in the cluster']
# self.NUM_CONTAINERS = params['number of containers']
# self.sim = Simulator()
self.env = LraClusterEnv(num_nodes=self.NUM_NODES)
ckpt_path_1 = path_surffix + path_name + "1" + "/model.ckpt"
ckpt_path_2 = path_surffix + path_name + "2" + "/model.ckpt"
ckpt_path_3 = path_surffix + path_name + "3" + "/model.ckpt"
self.nodes_per_group = int(params['nodes per group'])
# self.number_of_node_groups = int(self.NUM_NODES / self.nodes_per_group)
"""
Build Network
"""
self.n_actions = self.nodes_per_group #: 3 nodes per group
self.n_features = int(self.n_actions * (self.env.NUM_APPS + 1 + self.env.NUM_APPS) + 1 + self.env.NUM_APPS)
#: 29
self.RL_1 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=surffix + '1a')
self.RL_2 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=surffix + '2a')
self.RL_3 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=surffix + '3a')
self.RL_1.restore_session(ckpt_path_1)
self.RL_2.restore_session(ckpt_path_2)
self.RL_3.restore_session(ckpt_path_3)
self.observation_episode_1, self.action_episode_1, self.reward_episode_1, self.safety_episode_1 = [], [], [], []
self.observation_optimal_1, self.action_optimal_1, self.reward_optimal_1, self.safety_optimal_1 = [], [], [], []
self.observation_episode_2, self.action_episode_2, self.reward_episode_2, self.safety_episode_2 = [], [], [], []
self.observation_optimal_2, self.action_optimal_2, self.reward_optimal_2, self.safety_optimal_2 = [], [], [], []
self.observation_episode_3, self.action_episode_3, self.reward_episode_3, self.safety_episode_3 = [], [], [], []
self.observation_optimal_3, self.action_optimal_3, self.reward_optimal_3, self.safety_optimal_3 = [], [], [], []
def batch_data(self, rnd_array):
index_data = []
for i in range(7):
index_data.extend([i] * rnd_array[i])
return rnd_array, index_data
def batch_data_sub(self, rnd_array):
rnd_array = rnd_array.copy()
index_data = []
for i in range(7):
index_data.extend([i] * int(rnd_array[i]))
return rnd_array, index_data
def store_episode_1(self, observations, actions):
self.observation_episode_1.append(observations)
self.action_episode_1.append(actions)
def store_episode_2(self, observations, actions):
self.observation_episode_2.append(observations)
self.action_episode_2.append(actions)
def store_episode_3(self, observations, actions):
self.observation_episode_3.append(observations)
self.action_episode_3.append(actions)
def get_total_tput(self, rnd_array):
# assert sum(rnd_array) == 81
source_batch_, index_data = self.batch_data(rnd_array.astype(int)) # index_data = [0,1,2,0,1,2]
env = LraClusterEnv(num_nodes=self.NUM_NODES)
observation = env.reset().copy() # (9,9)
source_batch = source_batch_.copy()
nodes_per_group = int(params['nodes per group'])
NUM_CONTAINERS = int(sum(rnd_array))
"""
Episode
"""
"""
first layer
"""
source_batch_first = source_batch_.copy()
observation_first_layer = np.zeros([nodes_per_group, env.NUM_APPS], int)
for inter_episode_index in range(NUM_CONTAINERS):
appid = index_data[inter_episode_index]
source_batch_first[appid] -= 1
observation_first_layer_copy = observation_first_layer.copy()
observation_first_layer_copy[:, appid] += 1
observation_first_layer_copy = np.append(observation_first_layer_copy, observation_first_layer_copy > 9 * 2, axis=1)
observation_first_layer_copy = np.append(observation_first_layer_copy, observation_first_layer_copy.sum(axis=1).reshape(nodes_per_group, 1), axis=1)
# observation_first_layer_copy = np.append(observation_first_layer_copy, ((observation_first_layer_copy[:, 2] > 0) * (observation_first_layer_copy[:, 3] > 0)).reshape(nodes_per_group, 1), axis=1)
observation_first_layer_copy = np.array(observation_first_layer_copy).reshape(1, -1)
observation_first_layer_copy = np.append(observation_first_layer_copy, appid).reshape(1, -1)
observation_first_layer_copy = np.append(observation_first_layer_copy, np.array(source_batch_first)).reshape(1, -1)
action_1, prob_weights = self.RL_1.choose_action_determine(observation_first_layer_copy.copy())
observation_first_layer[action_1, appid] += 1
# self.store_episode_1(observation_first_layer_copy, action_1)
"""
second layer
"""
observation_second_layer_aggregation = np.empty([0, env.NUM_APPS], int) # 9*20
number_cont_second_layer = []
for second_layer_index in range(nodes_per_group):
rnd_array = observation_first_layer[second_layer_index].copy()
source_batch_second, index_data = self.batch_data_sub(rnd_array)
observation_second_layer = np.zeros([nodes_per_group, env.NUM_APPS], int)
NUM_CONTAINERS_second = sum(source_batch_second)
number_cont_second_layer.append(NUM_CONTAINERS_second)
for inter_episode_index in range(NUM_CONTAINERS_second):
appid = index_data[inter_episode_index]
source_batch_second[appid] -= 1
observation_second_layer_copy = observation_second_layer.copy()
observation_second_layer_copy[:, appid] += 1
observation_second_layer_copy = np.append(observation_second_layer_copy, observation_second_layer_copy > 3 * 2, axis=1)
observation_second_layer_copy = np.append(observation_second_layer_copy, observation_second_layer_copy.sum(axis=1).reshape(nodes_per_group, 1), axis=1)
# observation_second_layer_copy = np.append(observation_second_layer_copy, ((observation_second_layer_copy[:, 2] > 0) * (observation_second_layer_copy[:, 3] > 0)).reshape(nodes_per_group, 1), axis=1)
observation_second_layer_copy = np.array(observation_second_layer_copy).reshape(1, -1)
observation_second_layer_copy = np.append(observation_second_layer_copy, appid).reshape(1, -1)
observation_second_layer_copy = np.append(observation_second_layer_copy, np.array(source_batch_second)).reshape(1, -1)
action_2, prob_weights = self.RL_2.choose_action_determine(observation_second_layer_copy.copy())
observation_second_layer[action_2, appid] += 1
# self.store_episode_2(observation_second_layer_copy, action_2)
observation_second_layer_aggregation = np.append(observation_second_layer_aggregation, observation_second_layer, 0)
"""
third layer
"""
observation_third_layer_aggregation = np.empty([0, env.NUM_APPS], int) # 9*20
number_cont_third_layer = []
for third_layer_index in range(nodes_per_group * nodes_per_group):
rnd_array = observation_second_layer_aggregation[third_layer_index].copy()
source_batch_third, index_data = self.batch_data_sub(rnd_array)
observation_third_layer = np.zeros([nodes_per_group, env.NUM_APPS], int)
NUM_CONTAINERS_third = sum(source_batch_third)
number_cont_third_layer.append(NUM_CONTAINERS_third)
for inter_episode_index in range(NUM_CONTAINERS_third):
appid = index_data[inter_episode_index]
source_batch_third[appid] -= 1
observation_third_layer_copy = observation_third_layer.copy()
observation_third_layer_copy[:, appid] += 1
observation_third_layer_copy = np.append(observation_third_layer_copy, observation_third_layer_copy > 1 * 2, axis=1)
observation_third_layer_copy = np.append(observation_third_layer_copy, observation_third_layer_copy.sum(axis=1).reshape(nodes_per_group, 1), axis=1)
# observation_third_layer_copy = np.append(observation_third_layer_copy, ((observation_third_layer_copy[:, 2] > 0) * (observation_third_layer_copy[:, 3] > 0)).reshape(nodes_per_group, 1), axis=1)
observation_third_layer_copy = np.array(observation_third_layer_copy).reshape(1, -1)
observation_third_layer_copy = np.append(observation_third_layer_copy, appid).reshape(1, -1)
observation_third_layer_copy = np.append(observation_third_layer_copy, np.array(source_batch_third)).reshape(1, -1)
action_3, prob_weights = self.RL_3.choose_action_determine(observation_third_layer_copy.copy())
observation_third_layer[action_3, appid] += 1
# self.store_episode_3(observation_third_layer_copy, action_3)
observation_third_layer_aggregation = np.append(observation_third_layer_aggregation, observation_third_layer, 0)
"""
After an entire allocation, calculate total throughput, reward
"""
env.state = observation_third_layer_aggregation.copy()
assert sum(sum(env.state)) == NUM_CONTAINERS
assert (env.state.sum(0) == source_batch_).all()
"""
After an entire allocation, calculate total throughput, reward
"""
# state = env.state
# assert sum(sum(self.env.state)) == 81
return env.state
| testbed/SubScheduler.py | 11,316 | parameters set
'path': "Dynamic_large_100", 'path': "Dynamic_large_100_limit10", 'number of containers': 81, TODO: we could add more constraints here >8 bad node index_this_replace = good_index[np.random.randint(length)] self.NUM_CONTAINERS = params['number of containers'] self.sim = Simulator() self.number_of_node_groups = int(self.NUM_NODES / self.nodes_per_group): 3 nodes per group: 29 assert sum(rnd_array) == 81 index_data = [0,1,2,0,1,2] (9,9) observation_first_layer_copy = np.append(observation_first_layer_copy, ((observation_first_layer_copy[:, 2] > 0) * (observation_first_layer_copy[:, 3] > 0)).reshape(nodes_per_group, 1), axis=1) self.store_episode_1(observation_first_layer_copy, action_1) 9*20 observation_second_layer_copy = np.append(observation_second_layer_copy, ((observation_second_layer_copy[:, 2] > 0) * (observation_second_layer_copy[:, 3] > 0)).reshape(nodes_per_group, 1), axis=1) self.store_episode_2(observation_second_layer_copy, action_2) 9*20 observation_third_layer_copy = np.append(observation_third_layer_copy, ((observation_third_layer_copy[:, 2] > 0) * (observation_third_layer_copy[:, 3] > 0)).reshape(nodes_per_group, 1), axis=1) self.store_episode_3(observation_third_layer_copy, action_3) state = env.state assert sum(sum(self.env.state)) == 81 | 1,290 | en | 0.430063 |
# Advent of Code 2021 - Day: 24
# Imports (Always imports data based on the folder and file name)
from aocd import data, submit
def solve(lines):
# We need to simply find all the pairs of numbers, i.e. the numbers on lines 6 and 16 and store them.
pairs = [(int(lines[i * 18 + 5][6:]), int(lines[i * 18 + 15][6:])) for i in range(14)]
# Once getting the pairs we will need a stack and a map to store the pairs, as well constraints.
stack = []
constraints = {}
# Enumerate helps because we can get the index of the pair at the same time.
for i, (a, b) in enumerate(pairs):
# If (line 6) is positive we need to add line 16 and index to stack, else pop the last element from the stack and add it to constraints.
if a > 0:
stack.append((i, b))
else:
k, bk = stack.pop()
constraints[i] = (k, bk + a)
# At this point the constraints are stored at the relevant index for which they affect and can be used to find the minimum or maximum element at that index in the answer.
max_ans = {}
min_ans = {}
for i, (k, d) in constraints.items():
max_ans[i] = min(9, 9 + d)
max_ans[k] = min(9, 9 - d)
min_ans[i] = max(1, 1 + d)
min_ans[k] = max(1, 1 - d)
p1 = "".join(str(max_ans[i]) for i in range(14))
p2 = "".join(str(min_ans[i]) for i in range(14))
print("Star 1:", p1)
print("Star 2:", p2)
submit(p1, part="a", day=24, year=2021)
submit(p2, part="b", day=24, year=2021)
# Solution
def main():
solve(data.splitlines())
# Call the main function.
if __name__ == '__main__':
main() | Solutions/2021/24.py | 1,514 | Advent of Code 2021 - Day: 24 Imports (Always imports data based on the folder and file name) We need to simply find all the pairs of numbers, i.e. the numbers on lines 6 and 16 and store them. Once getting the pairs we will need a stack and a map to store the pairs, as well constraints. Enumerate helps because we can get the index of the pair at the same time. If (line 6) is positive we need to add line 16 and index to stack, else pop the last element from the stack and add it to constraints. At this point the constraints are stored at the relevant index for which they affect and can be used to find the minimum or maximum element at that index in the answer. Solution Call the main function. | 700 | en | 0.916896 |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Meta tests for mappers.
The test checks the output of the swapper to a ground truth DAG (one for each
test/swapper) saved in as a QASM (in `test/python/qasm/`). If they need
to be regenerated, the DAG candidate is compiled and run in a simulator and
the count is checked before being saved. This happens with (in the root
directory):
> python -m test.python.transpiler.test_mappers regenerate
To make a new swapper pass throw all the common tests, create a new class inside the file
`path/to/test_mappers.py` that:
* the class name should start with `Tests...`.
* inheriting from ``SwapperCommonTestCases, QiskitTestCase``
* overwrite the required attribute ``pass_class``
For example::
class TestsSomeSwap(SwapperCommonTestCases, QiskitTestCase):
pass_class = SomeSwap # The pass class
additional_args = {'seed_transpiler': 42} # In case SomeSwap.__init__ requires
# additional arguments
To **add a test for all the swappers**, add a new method ``test_foo``to the
``SwapperCommonTestCases`` class:
* defining the following required ``self`` attributes: ``self.count``,
``self.shots``, ``self.delta``. They are required for the regeneration of the
ground truth.
* use the ``self.assertResult`` assertion for comparing for regeneration of the
ground truth.
* explicitly set a unique ``name`` of the ``QuantumCircuit``, as it it used
for the name of the QASM file of the ground truth.
For example::
def test_a_common_test(self):
self.count = {'000': 512, '110': 512} # The expected count for this circuit
self.shots = 1024 # Shots to run in the backend.
self.delta = 5 # This is delta for the AlmostEqual during
# the count check
coupling_map = [[0, 1], [0, 2]] # The coupling map for this specific test
qr = QuantumRegister(3, 'q') #
cr = ClassicalRegister(3, 'c') # Set the circuit to test
circuit = QuantumCircuit(qr, cr, # and don't forget to put a name
name='some_name') # (it will be used to save the QASM
circuit.h(qr[1]) #
circuit.cx(qr[1], qr[2]) #
circuit.measure(qr, cr) #
result = transpile(circuit, self.create_backend(), coupling_map=coupling_map,
pass_manager=self.create_passmanager(coupling_map))
self.assertResult(result, circuit)
```
"""
# pylint: disable=attribute-defined-outside-init
import unittest
import sys
import os
from qiskit import execute
from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit, BasicAer
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import BasicSwap, LookaheadSwap, StochasticSwap, SabreSwap
from qiskit.transpiler.passes import SetLayout
from qiskit.transpiler import CouplingMap, Layout
from qiskit.test import QiskitTestCase
DIRNAME = QiskitTestCase._get_resource_path('qasm')
class CommonUtilitiesMixin:
"""Utilities for meta testing.
Subclasses should redefine the ``pass_class`` argument, with a Swap Mapper
class.
Note: This class assumes that the subclass is also inheriting from
``QiskitTestCase``, and it uses ``QiskitTestCase`` methods directly.
"""
regenerate_expected = False
seed_simulator = 42
seed_transpiler = 42
additional_args = {}
pass_class = None
def create_passmanager(self, coupling_map, initial_layout=None):
"""Returns a PassManager using self.pass_class(coupling_map, initial_layout)"""
passmanager = PassManager()
if initial_layout:
passmanager.append(SetLayout(Layout(initial_layout)))
# pylint: disable=not-callable
passmanager.append(self.pass_class(CouplingMap(coupling_map), **self.additional_args))
return passmanager
def create_backend(self):
"""Returns a Backend."""
return BasicAer.get_backend('qasm_simulator')
def generate_ground_truth(self, transpiled_result, filename):
"""Generates the expected result into a file.
Checks if transpiled_result matches self.counts by running in a backend
(self.create_backend()). That's saved in a QASM in filename.
Args:
transpiled_result (DAGCircuit): The DAGCircuit to execute.
filename (string): Where the QASM is saved.
"""
sim_backend = self.create_backend()
job = execute(transpiled_result, sim_backend, seed_simulator=self.seed_simulator,
seed_transpiler=self.seed_transpiler, shots=self.shots)
self.assertDictAlmostEqual(self.counts, job.result().get_counts(), delta=self.delta)
transpiled_result.qasm(formatted=False, filename=filename)
def assertResult(self, result, circuit):
"""Fetches the QASM in circuit.name file and compares it with result."""
qasm_name = '%s_%s.qasm' % (type(self).__name__, circuit.name)
filename = os.path.join(DIRNAME, qasm_name)
if self.regenerate_expected:
# Run result in backend to test that is valid.
self.generate_ground_truth(result, filename)
expected = QuantumCircuit.from_qasm_file(filename)
self.assertEqual(result, expected)
class SwapperCommonTestCases(CommonUtilitiesMixin):
"""Tests that are run in several mappers.
The tests here will be run in several mappers. When adding a test, please
ensure that the test:
* defines ``self.count``, ``self.shots``, ``self.delta``.
* uses the ``self.assertResult`` assertion for comparing for regeneration of
the ground truth.
* explicitly sets a unique ``name`` of the ``QuantumCircuit``.
See also ``CommonUtilitiesMixin`` and the module docstring.
"""
def test_a_cx_to_map(self):
"""A single CX needs to be remapped.
q0:----------m-----
|
q1:-[H]-(+)--|-m---
| | |
q2:------.---|-|-m-
| | |
c0:----------.-|-|-
c1:------------.-|-
c2:--------------.-
CouplingMap map: [1]<-[0]->[2]
expected count: '000': 50%
'110': 50%
"""
self.counts = {'000': 512, '110': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [0, 2]]
qr = QuantumRegister(3, 'q')
cr = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qr, cr, name='a_cx_to_map')
circuit.h(qr[1])
circuit.cx(qr[1], qr[2])
circuit.measure(qr, cr)
result = self.create_passmanager(coupling_map).run(circuit)
self.assertResult(result, circuit)
def test_initial_layout(self):
"""Using a non-trivial initial_layout.
q3:----------------m--
q0:----------m-----|--
| |
q1:-[H]-(+)--|-m---|--
| | | |
q2:------.---|-|-m-|--
| | | |
c0:----------.-|-|-|--
c1:------------.-|-|--
c2:--------------.-|--
c3:----------------.--
CouplingMap map: [1]<-[0]->[2]->[3]
expected count: '000': 50%
'110': 50%
"""
self.counts = {'0000': 512, '0110': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [0, 2], [2, 3]]
qr = QuantumRegister(4, 'q')
cr = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(qr, cr, name='initial_layout')
circuit.h(qr[1])
circuit.cx(qr[1], qr[2])
circuit.measure(qr, cr)
layout = {qr[3]: 0, qr[0]: 1, qr[1]: 2, qr[2]: 3}
result = self.create_passmanager(coupling_map, layout).run(circuit)
self.assertResult(result, circuit)
def test_handle_measurement(self):
"""Handle measurement correctly.
q0:--.-----(+)-m-------
| | |
q1:-(+)-(+)-|--|-m-----
| | | |
q2:------|--|--|-|-m---
| | | | |
q3:-[H]--.--.--|-|-|-m-
| | | |
c0:------------.-|-|-|-
c1:--------------.-|-|-
c2:----------------.-|-
c3:------------------.-
CouplingMap map: [0]->[1]->[2]->[3]
expected count: '0000': 50%
'1011': 50%
"""
self.counts = {'1011': 512, '0000': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [1, 2], [2, 3]]
qr = QuantumRegister(4, 'q')
cr = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(qr, cr, name='handle_measurement')
circuit.h(qr[3])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[3], qr[1])
circuit.cx(qr[3], qr[0])
circuit.measure(qr, cr)
result = self.create_passmanager(coupling_map).run(circuit)
self.assertResult(result, circuit)
class TestsBasicSwap(SwapperCommonTestCases, QiskitTestCase):
"""Test SwapperCommonTestCases using BasicSwap."""
pass_class = BasicSwap
class TestsLookaheadSwap(SwapperCommonTestCases, QiskitTestCase):
"""Test SwapperCommonTestCases using LookaheadSwap."""
pass_class = LookaheadSwap
class TestsStochasticSwap(SwapperCommonTestCases, QiskitTestCase):
"""Test SwapperCommonTestCases using StochasticSwap."""
pass_class = StochasticSwap
additional_args = {'seed': 0}
class TestsSabreSwap(SwapperCommonTestCases, QiskitTestCase):
"""Test SwapperCommonTestCases using SabreSwap."""
pass_class = SabreSwap
additional_args = {'seed': 0}
if __name__ == '__main__':
if len(sys.argv) >= 2 and sys.argv[1] == 'regenerate':
CommonUtilitiesMixin.regenerate_expected = True
del sys.argv[1]
unittest.main()
| test/python/transpiler/test_mappers.py | 10,583 | Utilities for meta testing.
Subclasses should redefine the ``pass_class`` argument, with a Swap Mapper
class.
Note: This class assumes that the subclass is also inheriting from
``QiskitTestCase``, and it uses ``QiskitTestCase`` methods directly.
Tests that are run in several mappers.
The tests here will be run in several mappers. When adding a test, please
ensure that the test:
* defines ``self.count``, ``self.shots``, ``self.delta``.
* uses the ``self.assertResult`` assertion for comparing for regeneration of
the ground truth.
* explicitly sets a unique ``name`` of the ``QuantumCircuit``.
See also ``CommonUtilitiesMixin`` and the module docstring.
Test SwapperCommonTestCases using BasicSwap.
Test SwapperCommonTestCases using LookaheadSwap.
Test SwapperCommonTestCases using SabreSwap.
Test SwapperCommonTestCases using StochasticSwap.
Fetches the QASM in circuit.name file and compares it with result.
Returns a Backend.
Returns a PassManager using self.pass_class(coupling_map, initial_layout)
Generates the expected result into a file.
Checks if transpiled_result matches self.counts by running in a backend
(self.create_backend()). That's saved in a QASM in filename.
Args:
transpiled_result (DAGCircuit): The DAGCircuit to execute.
filename (string): Where the QASM is saved.
A single CX needs to be remapped.
q0:----------m-----
|
q1:-[H]-(+)--|-m---
| | |
q2:------.---|-|-m-
| | |
c0:----------.-|-|-
c1:------------.-|-
c2:--------------.-
CouplingMap map: [1]<-[0]->[2]
expected count: '000': 50%
'110': 50%
Handle measurement correctly.
q0:--.-----(+)-m-------
| | |
q1:-(+)-(+)-|--|-m-----
| | | |
q2:------|--|--|-|-m---
| | | | |
q3:-[H]--.--.--|-|-|-m-
| | | |
c0:------------.-|-|-|-
c1:--------------.-|-|-
c2:----------------.-|-
c3:------------------.-
CouplingMap map: [0]->[1]->[2]->[3]
expected count: '0000': 50%
'1011': 50%
Using a non-trivial initial_layout.
q3:----------------m--
q0:----------m-----|--
| |
q1:-[H]-(+)--|-m---|--
| | | |
q2:------.---|-|-m-|--
| | | |
c0:----------.-|-|-|--
c1:------------.-|-|--
c2:--------------.-|--
c3:----------------.--
CouplingMap map: [1]<-[0]->[2]->[3]
expected count: '000': 50%
'110': 50%
Meta tests for mappers.
The test checks the output of the swapper to a ground truth DAG (one for each
test/swapper) saved in as a QASM (in `test/python/qasm/`). If they need
to be regenerated, the DAG candidate is compiled and run in a simulator and
the count is checked before being saved. This happens with (in the root
directory):
> python -m test.python.transpiler.test_mappers regenerate
To make a new swapper pass throw all the common tests, create a new class inside the file
`path/to/test_mappers.py` that:
* the class name should start with `Tests...`.
* inheriting from ``SwapperCommonTestCases, QiskitTestCase``
* overwrite the required attribute ``pass_class``
For example::
class TestsSomeSwap(SwapperCommonTestCases, QiskitTestCase):
pass_class = SomeSwap # The pass class
additional_args = {'seed_transpiler': 42} # In case SomeSwap.__init__ requires
# additional arguments
To **add a test for all the swappers**, add a new method ``test_foo``to the
``SwapperCommonTestCases`` class:
* defining the following required ``self`` attributes: ``self.count``,
``self.shots``, ``self.delta``. They are required for the regeneration of the
ground truth.
* use the ``self.assertResult`` assertion for comparing for regeneration of the
ground truth.
* explicitly set a unique ``name`` of the ``QuantumCircuit``, as it it used
for the name of the QASM file of the ground truth.
For example::
def test_a_common_test(self):
self.count = {'000': 512, '110': 512} # The expected count for this circuit
self.shots = 1024 # Shots to run in the backend.
self.delta = 5 # This is delta for the AlmostEqual during
# the count check
coupling_map = [[0, 1], [0, 2]] # The coupling map for this specific test
qr = QuantumRegister(3, 'q') #
cr = ClassicalRegister(3, 'c') # Set the circuit to test
circuit = QuantumCircuit(qr, cr, # and don't forget to put a name
name='some_name') # (it will be used to save the QASM
circuit.h(qr[1]) #
circuit.cx(qr[1], qr[2]) #
circuit.measure(qr, cr) #
result = transpile(circuit, self.create_backend(), coupling_map=coupling_map,
pass_manager=self.create_passmanager(coupling_map))
self.assertResult(result, circuit)
```
-*- coding: utf-8 -*- This code is part of Qiskit. (C) Copyright IBM 2017, 2018. This code is licensed under the Apache License, Version 2.0. You may obtain a copy of this license in the LICENSE.txt file in the root directory of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. Any modifications or derivative works of this code must retain this copyright notice, and modified files need to carry a notice indicating that they have been altered from the originals. pylint: disable=attribute-defined-outside-init pylint: disable=not-callable Run result in backend to test that is valid. | 5,675 | en | 0.645541 |
from ..utils.core import concatenate
class StreamList(list):
"""Class to replace a basic list for streamed products
"""
def __init__(self, product):
if isinstance(product, list):
super(StreamList, self).__init__(product)
else:
super(StreamList, self).__init__([product])
if len(self) > 10000:
raise ValueError("StreamList can't be longer than 10000 because the filenames for caching are not adequate")
self._cached_aggregate = None
def aggregate(self):
if self._cached_aggregate is None:
return concatenate(self)
else:
return self._cached_aggregate
| geeksw/framework/stream.py | 679 | Class to replace a basic list for streamed products | 51 | en | 0.834552 |
"""
Django settings for django_i18n_example project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
from os.path import join
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3pykyaqk#*wgwp%$3l@9_az$_9m^-3z3xkbcm!fitj9w!1c802'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# Order is important here
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_i18n_example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(BASE_DIR, 'django_i18n_example', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_i18n_example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGES = (
('en-us', 'English (US)'),
('de', 'Deutsche'),
('ar', 'عربى'),
)
LANGUAGE_CODE = 'en-us'
LOCALE_PATHS = [
join(BASE_DIR, 'django_i18n_example', 'locale'),
]
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# Enable our static JS file serving
STATICFILES_DIRS = (
join(BASE_DIR, 'django_i18n_example', "static"),
)
| django_i18n_example/settings.py | 3,560 | Django settings for django_i18n_example project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
Build paths inside the project like this: BASE_DIR / 'subdir'. Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Order is important here Database https://docs.djangoproject.com/en/3.1/ref/settings/databases Password validation https://docs.djangoproject.com/en/3.1/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/3.1/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/3.1/howto/static-files/ Enable our static JS file serving | 1,051 | en | 0.695444 |
#!/usr/bin/python
# coding:utf-8
'''
Created on 2017-04-06
Update on 2017-11-17
Author: Peter/ApacheCN-xy/片刻
GitHub: https://github.com/apachecn/MachineLearning
'''
import sys
from numpy import mat, mean, power
'''
这个mapper文件按行读取所有的输入并创建一组对应的浮点数,然后得到数组的长度并创建NumPy矩阵。
再对所有的值进行平方,最后将均值和平方后的均值发送出去。这些值将用来计算全局的均值和方差。
Args:
file 输入数据
Return:
'''
def read_input(file):
for line in file:
yield line.rstrip() # 返回一个 yield 迭代器,每次获取下一个值,节约内存。
input = read_input(sys.stdin) # 创建一个输入的数据行的列表list
input = [float(line) for line in input] # 将得到的数据转化为 float 类型
numInputs = len(input) # 获取数据的个数,即输入文件的数据的行数
input = mat(input) # 将 List 转换为矩阵
sqInput = power(input, 2) # 将矩阵的数据分别求 平方,即 2次方
# 输出 数据的个数,n个数据的均值,n个数据平方之后的均值
# 第一行是标准输出,也就是reducer的输出
# 第二行识标准错误输出,即对主节点作出的响应报告,表明本节点工作正常。
# 【这不就是面试的装逼重点吗?如何设计监听架构细节】注意:一个好的习惯是想标准错误输出发送报告。如果某任务10分钟内没有报告输出,则将被Hadoop中止。
print("%d\t%f\t%f" % (numInputs, mean(input), mean(sqInput))) # 计算均值
print("map report: still alive", file=sys.stderr)
| ML-in-Action/MachineLearning-dev/src/py3.x/ML/15.BigData_MapReduce/mrMeanMapper.py | 1,737 | Created on 2017-04-06
Update on 2017-11-17
Author: Peter/ApacheCN-xy/片刻
GitHub: https://github.com/apachecn/MachineLearning
!/usr/bin/python coding:utf-8 返回一个 yield 迭代器,每次获取下一个值,节约内存。 创建一个输入的数据行的列表list 将得到的数据转化为 float 类型 获取数据的个数,即输入文件的数据的行数 将 List 转换为矩阵 将矩阵的数据分别求 平方,即 2次方 输出 数据的个数,n个数据的均值,n个数据平方之后的均值 第一行是标准输出,也就是reducer的输出 第二行识标准错误输出,即对主节点作出的响应报告,表明本节点工作正常。 【这不就是面试的装逼重点吗?如何设计监听架构细节】注意:一个好的习惯是想标准错误输出发送报告。如果某任务10分钟内没有报告输出,则将被Hadoop中止。 计算均值 | 443 | zh | 0.954378 |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 4 17:07:18 2016
@author: sshank
"""
# Print out the required annotations at the moment... change to put into MySQL
from Bio.Seq import Seq
from Bio import AlignIO
from Bio.SeqRecord import SeqRecord
from argparse import ArgumentParser
parser = ArgumentParser()
rst_help = 'Path to parsed RST file (created with parse_rst.py).'
parser.add_argument('-r', '--rst', metavar='RST', help=rst_help, dest='rst')
input_help = 'Path to input fasta file (aligned).'
parser.add_argument('-i', '--input', metavar='INPUT', help=input_help, dest='input')
args = parser.parse_args()
rst_filename = args.rst
input_filename = args.input
descendent_sequence = ''
ancestral_sequence = ''
descendent_annotations = []
descendent_changes = []
with open(rst_filename, 'r') as file:
for line in file:
split = line.split()
descendent_codon = split[6]
ancestral_codon = split[16]
if descendent_codon != '---':
descendent_amino_acid = Seq(descendent_codon).translate()
descendent_sequence += str(descendent_amino_acid)
if descendent_codon == ancestral_codon or ancestral_codon == '---':
# No change or missing information
descendent_annotations.append(0)
descendent_changes.append('-')
else:
ancestral_amino_acid = Seq(ancestral_codon).translate()
if descendent_amino_acid == ancestral_amino_acid:
# Synonymous change
descendent_annotations.append(1)
change = ancestral_codon + '->' + descendent_codon
descendent_changes.append(change)
else:
# Nonsynonymous change
descendent_annotations.append(2)
change = str(ancestral_amino_acid) + '->' + str(descendent_amino_acid)
descendent_changes.append(change)
taed_descendent = SeqRecord(descendent_sequence, id='taed_descendent')
pdb_annotations = []
pdb_changes = []
alignment = AlignIO.read(input_filename, 'fasta')
d_index = 0
p_index = 0
for k in range(alignment.get_alignment_length()):
descendent_amino_acid, pdb_amino_acid = alignment[:, k]
if pdb_amino_acid != '-' and descendent_amino_acid != '-':
# There is a chance that something happened... append and increment both
pdb_annotations.append(descendent_annotations[d_index])
pdb_changes.append(descendent_changes[d_index])
p_index += 1
d_index += 1
else:
if pdb_amino_acid != '-':
pdb_annotations.append(0)
pdb_changes.append('-')
p_index += 1
if descendent_amino_acid != '-':
d_index += 1
print(','.join([str(i) for i in pdb_annotations]))
print('\n')
print("'" + "','".join([str(i) for i in pdb_changes])+ "'")
| create_pdb_annotations.py | 2,903 | Created on Tue Oct 4 17:07:18 2016
@author: sshank
-*- coding: utf-8 -*- Print out the required annotations at the moment... change to put into MySQL No change or missing information Synonymous change Nonsynonymous change There is a chance that something happened... append and increment both | 296 | en | 0.8674 |
# Copyright 2012-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from .. import mparser
from .. import environment
from .. import coredata
from .. import dependencies
from .. import mlog
from .. import build
from .. import optinterpreter
from .. import compilers
from .. import envconfig
from ..wrap import wrap, WrapMode
from .. import mesonlib
from ..mesonlib import MesonBugException, HoldableObject, FileMode, MachineChoice, OptionKey, listify, extract_as_list, has_path_sep
from ..programs import ExternalProgram, NonExistingExternalProgram
from ..dependencies import Dependency
from ..depfile import DepFile
from ..interpreterbase import ContainerTypeInfo, InterpreterBase, KwargInfo, typed_kwargs, typed_pos_args
from ..interpreterbase import noPosargs, noKwargs, permittedKwargs, noArgsFlattening, noSecondLevelHolderResolving, unholder_return
from ..interpreterbase import InterpreterException, InvalidArguments, InvalidCode, SubdirDoneRequest
from ..interpreterbase import Disabler, disablerIfNotFound
from ..interpreterbase import FeatureNew, FeatureDeprecated, FeatureNewKwargs, FeatureDeprecatedKwargs
from ..interpreterbase import ObjectHolder
from ..modules import ExtensionModule, ModuleObject, MutableModuleObject, NewExtensionModule, NotFoundExtensionModule
from ..cmake import CMakeInterpreter
from ..backend.backends import ExecutableSerialisation
from . import interpreterobjects as OBJ
from . import compiler as compilerOBJ
from .mesonmain import MesonMain
from .dependencyfallbacks import DependencyFallbacksHolder
from .interpreterobjects import (
SubprojectHolder,
Test,
RunProcess,
extract_required_kwarg,
extract_search_dirs,
NullSubprojectInterpreter,
)
from .type_checking import (
COMMAND_KW,
CT_BUILD_ALWAYS,
CT_BUILD_ALWAYS_STALE,
CT_BUILD_BY_DEFAULT,
CT_INPUT_KW,
CT_INSTALL_DIR_KW,
CT_OUTPUT_KW,
DEFAULT_OPTIONS,
DEPENDS_KW,
DEPEND_FILES_KW,
DEPFILE_KW,
DISABLER_KW,
ENV_KW,
ENV_METHOD_KW,
ENV_SEPARATOR_KW,
INSTALL_KW,
INSTALL_MODE_KW,
CT_INSTALL_TAG_KW,
INSTALL_TAG_KW,
LANGUAGE_KW,
NATIVE_KW, OVERRIDE_OPTIONS_KW,
REQUIRED_KW,
NoneType,
in_set_validator,
env_convertor_with_method
)
from . import primitives as P_OBJ
from pathlib import Path
import os
import shutil
import uuid
import re
import stat
import collections
import typing as T
import textwrap
import importlib
if T.TYPE_CHECKING:
import argparse
from typing_extensions import Literal
from . import kwargs
from ..backend.backends import Backend
from ..interpreterbase.baseobjects import InterpreterObject, TYPE_var, TYPE_kwargs
from ..programs import OverrideProgram
# Input source types passed to Targets
SourceInputs = T.Union[mesonlib.File, build.GeneratedList, build.BuildTarget, build.BothLibraries,
build.CustomTargetIndex, build.CustomTarget, build.GeneratedList,
build.ExtractedObjects, str]
# Input source types passed to the build.Target classes
SourceOutputs = T.Union[mesonlib.File, build.GeneratedList,
build.BuildTarget, build.CustomTargetIndex, build.CustomTarget,
build.ExtractedObjects, build.GeneratedList, build.StructuredSources]
def _project_version_validator(value: T.Union[T.List, str, mesonlib.File, None]) -> T.Optional[str]:
if isinstance(value, list):
if len(value) != 1:
return 'when passed as array must have a length of 1'
elif not isinstance(value[0], mesonlib.File):
return 'when passed as array must contain a File'
return None
def stringifyUserArguments(args: T.List[T.Any], quote: bool = False) -> str:
if isinstance(args, list):
return '[%s]' % ', '.join([stringifyUserArguments(x, True) for x in args])
elif isinstance(args, dict):
return '{%s}' % ', '.join(['{} : {}'.format(stringifyUserArguments(k, True), stringifyUserArguments(v, True)) for k, v in args.items()])
elif isinstance(args, bool):
return 'true' if args else 'false'
elif isinstance(args, int):
return str(args)
elif isinstance(args, str):
return f"'{args}'" if quote else args
raise InvalidArguments('Function accepts only strings, integers, bools, lists, dictionaries and lists thereof.')
class Summary:
def __init__(self, project_name: str, project_version: str):
self.project_name = project_name
self.project_version = project_version
self.sections = collections.defaultdict(dict)
self.max_key_len = 0
def add_section(self, section: str, values: T.Dict[str, T.Any], bool_yn: bool,
list_sep: T.Optional[str], subproject: str) -> None:
for k, v in values.items():
if k in self.sections[section]:
raise InterpreterException(f'Summary section {section!r} already have key {k!r}')
formatted_values = []
for i in listify(v):
if isinstance(i, bool) and bool_yn:
formatted_values.append(mlog.green('YES') if i else mlog.red('NO'))
elif isinstance(i, (str, int, bool)):
formatted_values.append(str(i))
elif isinstance(i, (ExternalProgram, Dependency)):
FeatureNew.single_use('dependency or external program in summary', '0.57.0', subproject)
formatted_values.append(i.summary_value())
elif isinstance(i, coredata.UserOption):
FeatureNew.single_use('feature option in summary', '0.58.0', subproject)
formatted_values.append(i.printable_value())
else:
m = 'Summary value in section {!r}, key {!r}, must be string, integer, boolean, dependency or external program'
raise InterpreterException(m.format(section, k))
self.sections[section][k] = (formatted_values, list_sep)
self.max_key_len = max(self.max_key_len, len(k))
def dump(self):
mlog.log(self.project_name, mlog.normal_cyan(self.project_version))
for section, values in self.sections.items():
mlog.log('') # newline
if section:
mlog.log(' ', mlog.bold(section))
for k, v in values.items():
v, list_sep = v
padding = self.max_key_len - len(k)
end = ' ' if v else ''
mlog.log(' ' * 3, k + ' ' * padding + ':', end=end)
indent = self.max_key_len + 6
self.dump_value(v, list_sep, indent)
mlog.log('') # newline
def dump_value(self, arr, list_sep, indent):
lines_sep = '\n' + ' ' * indent
if list_sep is None:
mlog.log(*arr, sep=lines_sep)
return
max_len = shutil.get_terminal_size().columns
line = []
line_len = indent
lines_sep = list_sep.rstrip() + lines_sep
for v in arr:
v_len = len(v) + len(list_sep)
if line and line_len + v_len > max_len:
mlog.log(*line, sep=list_sep, end=lines_sep)
line_len = indent
line = []
line.append(v)
line_len += v_len
mlog.log(*line, sep=list_sep)
known_library_kwargs = (
build.known_shlib_kwargs |
build.known_stlib_kwargs
)
known_build_target_kwargs = (
known_library_kwargs |
build.known_exe_kwargs |
build.known_jar_kwargs |
{'target_type'}
)
TEST_KWARGS: T.List[KwargInfo] = [
KwargInfo('args', ContainerTypeInfo(list, (str, mesonlib.File, build.BuildTarget, build.CustomTarget, build.CustomTargetIndex)),
listify=True, default=[]),
KwargInfo('should_fail', bool, default=False),
KwargInfo('timeout', int, default=30),
KwargInfo('workdir', (str, NoneType), default=None,
validator=lambda x: 'must be an absolute path' if not os.path.isabs(x) else None),
KwargInfo('protocol', str,
default='exitcode',
validator=in_set_validator({'exitcode', 'tap', 'gtest', 'rust'}),
since_values={'gtest': '0.55.0', 'rust': '0.57.0'}),
KwargInfo('priority', int, default=0, since='0.52.0'),
# TODO: env needs reworks of the way the environment variable holder itself works probably
ENV_KW,
DEPENDS_KW.evolve(since='0.46.0'),
KwargInfo('suite', ContainerTypeInfo(list, str), listify=True, default=['']), # yes, a list of empty string
KwargInfo('verbose', bool, default=False, since='0.62.0'),
]
permitted_dependency_kwargs = {
'allow_fallback',
'cmake_args',
'cmake_module_path',
'cmake_package_version',
'components',
'default_options',
'fallback',
'include_type',
'language',
'main',
'method',
'modules',
'native',
'not_found_message',
'optional_modules',
'private_headers',
'required',
'static',
'version',
}
implicit_check_false_warning = """You should add the boolean check kwarg to the run_command call.
It currently defaults to false,
but it will default to true in future releases of meson.
See also: https://github.com/mesonbuild/meson/issues/9300"""
class Interpreter(InterpreterBase, HoldableObject):
def __init__(
self,
_build: build.Build,
backend: T.Optional[Backend] = None,
subproject: str = '',
subdir: str = '',
subproject_dir: str = 'subprojects',
default_project_options: T.Optional[T.Dict[OptionKey, str]] = None,
mock: bool = False,
ast: T.Optional[mparser.CodeBlockNode] = None,
is_translated: bool = False,
user_defined_options: T.Optional['argparse.Namespace'] = None,
) -> None:
super().__init__(_build.environment.get_source_dir(), subdir, subproject)
self.active_projectname = ''
self.build = _build
self.environment = self.build.environment
self.coredata = self.environment.get_coredata()
self.backend = backend
self.summary: T.Dict[str, 'Summary'] = {}
self.modules: T.Dict[str, NewExtensionModule] = {}
# Subproject directory is usually the name of the subproject, but can
# be different for dependencies provided by wrap files.
self.subproject_directory_name = subdir.split(os.path.sep)[-1]
self.subproject_dir = subproject_dir
self.option_file = os.path.join(self.source_root, self.subdir, 'meson_options.txt')
if not mock and ast is None:
self.load_root_meson_file()
self.sanity_check_ast()
elif ast is not None:
self.ast = ast
self.sanity_check_ast()
self.builtin.update({'meson': MesonMain(self.build, self)})
self.generators: T.List[build.Generator] = []
self.processed_buildfiles = set() # type: T.Set[str]
self.project_args_frozen = False
self.global_args_frozen = False # implies self.project_args_frozen
self.subprojects: T.Dict[str, SubprojectHolder] = {}
self.subproject_stack: T.List[str] = []
self.configure_file_outputs: T.Dict[str, int] = {}
# Passed from the outside, only used in subprojects.
if default_project_options:
self.default_project_options = default_project_options.copy()
else:
self.default_project_options = {}
self.project_default_options: T.Dict[OptionKey, str] = {}
self.build_func_dict()
self.build_holder_map()
self.user_defined_options = user_defined_options
# build_def_files needs to be defined before parse_project is called
#
# For non-meson subprojects, we'll be using the ast. Even if it does
# exist we don't want to add a dependency on it, it's autogenerated
# from the actual build files, and is just for reference.
self.build_def_files: mesonlib.OrderedSet[str] = mesonlib.OrderedSet()
build_filename = os.path.join(self.subdir, environment.build_filename)
if not is_translated:
self.build_def_files.add(build_filename)
if not mock:
self.parse_project()
self._redetect_machines()
def __getnewargs_ex__(self) -> T.Tuple[T.Tuple[object], T.Dict[str, object]]:
raise MesonBugException('This class is unpicklable')
def _redetect_machines(self) -> None:
# Re-initialize machine descriptions. We can do a better job now because we
# have the compilers needed to gain more knowledge, so wipe out old
# inference and start over.
machines = self.build.environment.machines.miss_defaulting()
machines.build = environment.detect_machine_info(self.coredata.compilers.build)
self.build.environment.machines = machines.default_missing()
assert self.build.environment.machines.build.cpu is not None
assert self.build.environment.machines.host.cpu is not None
assert self.build.environment.machines.target.cpu is not None
self.builtin['build_machine'] = \
OBJ.MachineHolder(self.build.environment.machines.build, self)
self.builtin['host_machine'] = \
OBJ.MachineHolder(self.build.environment.machines.host, self)
self.builtin['target_machine'] = \
OBJ.MachineHolder(self.build.environment.machines.target, self)
def build_func_dict(self) -> None:
self.funcs.update({'add_global_arguments': self.func_add_global_arguments,
'add_global_link_arguments': self.func_add_global_link_arguments,
'add_languages': self.func_add_languages,
'add_project_arguments': self.func_add_project_arguments,
'add_project_link_arguments': self.func_add_project_link_arguments,
'add_test_setup': self.func_add_test_setup,
'alias_target': self.func_alias_target,
'assert': self.func_assert,
'benchmark': self.func_benchmark,
'both_libraries': self.func_both_lib,
'build_target': self.func_build_target,
'configuration_data': self.func_configuration_data,
'configure_file': self.func_configure_file,
'custom_target': self.func_custom_target,
'declare_dependency': self.func_declare_dependency,
'dependency': self.func_dependency,
'disabler': self.func_disabler,
'environment': self.func_environment,
'error': self.func_error,
'executable': self.func_executable,
'files': self.func_files,
'find_library': self.func_find_library,
'find_program': self.func_find_program,
'generator': self.func_generator,
'get_option': self.func_get_option,
'get_variable': self.func_get_variable,
'gettext': self.func_gettext,
'import': self.func_import,
'include_directories': self.func_include_directories,
'install_data': self.func_install_data,
'install_emptydir': self.func_install_emptydir,
'install_headers': self.func_install_headers,
'install_man': self.func_install_man,
'install_subdir': self.func_install_subdir,
'install_symlink': self.func_install_symlink,
'is_disabler': self.func_is_disabler,
'is_variable': self.func_is_variable,
'jar': self.func_jar,
'join_paths': self.func_join_paths,
'library': self.func_library,
'message': self.func_message,
'option': self.func_option,
'project': self.func_project,
'range': self.func_range,
'run_command': self.func_run_command,
'run_target': self.func_run_target,
'set_variable': self.func_set_variable,
'structured_sources': self.func_structured_sources,
'subdir': self.func_subdir,
'shared_library': self.func_shared_lib,
'shared_module': self.func_shared_module,
'static_library': self.func_static_lib,
'subdir_done': self.func_subdir_done,
'subproject': self.func_subproject,
'summary': self.func_summary,
'test': self.func_test,
'unset_variable': self.func_unset_variable,
'vcs_tag': self.func_vcs_tag,
'warning': self.func_warning,
})
if 'MESON_UNIT_TEST' in os.environ:
self.funcs.update({'exception': self.func_exception})
def build_holder_map(self) -> None:
'''
Build a mapping of `HoldableObject` types to their corresponding
`ObjectHolder`s. This mapping is used in `InterpreterBase` to automatically
holderify all returned values from methods and functions.
'''
self.holder_map.update({
# Primitives
list: P_OBJ.ArrayHolder,
dict: P_OBJ.DictHolder,
int: P_OBJ.IntegerHolder,
bool: P_OBJ.BooleanHolder,
str: P_OBJ.StringHolder,
P_OBJ.MesonVersionString: P_OBJ.MesonVersionStringHolder,
# Meson types
mesonlib.File: OBJ.FileHolder,
build.SharedLibrary: OBJ.SharedLibraryHolder,
build.StaticLibrary: OBJ.StaticLibraryHolder,
build.BothLibraries: OBJ.BothLibrariesHolder,
build.SharedModule: OBJ.SharedModuleHolder,
build.Executable: OBJ.ExecutableHolder,
build.Jar: OBJ.JarHolder,
build.CustomTarget: OBJ.CustomTargetHolder,
build.CustomTargetIndex: OBJ.CustomTargetIndexHolder,
build.Generator: OBJ.GeneratorHolder,
build.GeneratedList: OBJ.GeneratedListHolder,
build.ExtractedObjects: OBJ.GeneratedObjectsHolder,
build.RunTarget: OBJ.RunTargetHolder,
build.AliasTarget: OBJ.AliasTargetHolder,
build.Headers: OBJ.HeadersHolder,
build.Man: OBJ.ManHolder,
build.EmptyDir: OBJ.EmptyDirHolder,
build.Data: OBJ.DataHolder,
build.SymlinkData: OBJ.SymlinkDataHolder,
build.InstallDir: OBJ.InstallDirHolder,
build.IncludeDirs: OBJ.IncludeDirsHolder,
build.EnvironmentVariables: OBJ.EnvironmentVariablesHolder,
build.StructuredSources: OBJ.StructuredSourcesHolder,
compilers.RunResult: compilerOBJ.TryRunResultHolder,
dependencies.ExternalLibrary: OBJ.ExternalLibraryHolder,
coredata.UserFeatureOption: OBJ.FeatureOptionHolder,
envconfig.MachineInfo: OBJ.MachineHolder,
build.ConfigurationData: OBJ.ConfigurationDataHolder,
})
'''
Build a mapping of `HoldableObject` base classes to their
corresponding `ObjectHolder`s. The difference to `self.holder_map`
is that the keys here define an upper bound instead of requiring an
exact match.
The mappings defined here are only used when there was no direct hit
found in `self.holder_map`.
'''
self.bound_holder_map.update({
dependencies.Dependency: OBJ.DependencyHolder,
ExternalProgram: OBJ.ExternalProgramHolder,
compilers.Compiler: compilerOBJ.CompilerHolder,
ModuleObject: OBJ.ModuleObjectHolder,
MutableModuleObject: OBJ.MutableModuleObjectHolder,
})
def append_holder_map(self, held_type: T.Type[mesonlib.HoldableObject], holder_type: T.Type[ObjectHolder]) -> None:
'''
Adds one additional mapping to the `holder_map`.
The intended use for this function is in the `initialize` method of
modules to register custom object holders.
'''
self.holder_map.update({
held_type: holder_type
})
def process_new_values(self, invalues: T.List[T.Union[TYPE_var, ExecutableSerialisation]]) -> None:
invalues = listify(invalues)
for v in invalues:
if isinstance(v, ObjectHolder):
raise InterpreterException('Modules must not return ObjectHolders')
if isinstance(v, (build.BuildTarget, build.CustomTarget, build.RunTarget)):
self.add_target(v.name, v)
elif isinstance(v, list):
self.process_new_values(v)
elif isinstance(v, ExecutableSerialisation):
v.subproject = self.subproject
self.build.install_scripts.append(v)
elif isinstance(v, build.Data):
self.build.data.append(v)
elif isinstance(v, build.SymlinkData):
self.build.symlinks.append(v)
elif isinstance(v, dependencies.InternalDependency):
# FIXME: This is special cased and not ideal:
# The first source is our new VapiTarget, the rest are deps
self.process_new_values(v.sources[0])
elif isinstance(v, build.InstallDir):
self.build.install_dirs.append(v)
elif isinstance(v, Test):
self.build.tests.append(v)
elif isinstance(v, (int, str, bool, Disabler, ObjectHolder, build.GeneratedList,
ExternalProgram, build.ConfigurationData)):
pass
else:
raise InterpreterException(f'Module returned a value of unknown type {v!r}.')
def get_build_def_files(self) -> mesonlib.OrderedSet[str]:
return self.build_def_files
def add_build_def_file(self, f: mesonlib.FileOrString) -> None:
# Use relative path for files within source directory, and absolute path
# for system files. Skip files within build directory. Also skip not regular
# files (e.g. /dev/stdout) Normalize the path to avoid duplicates, this
# is especially important to convert '/' to '\' on Windows.
if isinstance(f, mesonlib.File):
if f.is_built:
return
f = os.path.normpath(f.relative_name())
elif os.path.isfile(f) and not f.startswith('/dev'):
srcdir = Path(self.environment.get_source_dir())
builddir = Path(self.environment.get_build_dir())
try:
f_ = Path(f).resolve()
except OSError:
f_ = Path(f)
s = f_.stat()
if (hasattr(s, 'st_file_attributes') and
s.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT != 0 and
s.st_reparse_tag == stat.IO_REPARSE_TAG_APPEXECLINK):
# This is a Windows Store link which we can't
# resolve, so just do our best otherwise.
f_ = f_.parent.resolve() / f_.name
else:
raise
if builddir in f_.parents:
return
if srcdir in f_.parents:
f_ = f_.relative_to(srcdir)
f = str(f_)
else:
return
if f not in self.build_def_files:
self.build_def_files.add(f)
def get_variables(self) -> T.Dict[str, InterpreterObject]:
return self.variables
def check_stdlibs(self) -> None:
machine_choices = [MachineChoice.HOST]
if self.coredata.is_cross_build():
machine_choices.append(MachineChoice.BUILD)
for for_machine in machine_choices:
props = self.build.environment.properties[for_machine]
for l in self.coredata.compilers[for_machine].keys():
try:
di = mesonlib.stringlistify(props.get_stdlib(l))
except KeyError:
continue
if len(di) == 1:
FeatureNew.single_use('stdlib without variable name', '0.56.0', self.subproject, location=self.current_node)
kwargs = {'native': for_machine is MachineChoice.BUILD,
}
name = l + '_stdlib'
df = DependencyFallbacksHolder(self, [name])
df.set_fallback(di)
dep = df.lookup(kwargs, force_fallback=True)
self.build.stdlibs[for_machine][l] = dep
def _import_module(self, modname: str, required: bool) -> NewExtensionModule:
if modname in self.modules:
return self.modules[modname]
try:
module = importlib.import_module('mesonbuild.modules.' + modname)
except ImportError:
if required:
raise InvalidArguments(f'Module "{modname}" does not exist')
ext_module = NotFoundExtensionModule()
else:
ext_module = module.initialize(self)
assert isinstance(ext_module, (ExtensionModule, NewExtensionModule))
self.build.modules.append(modname)
self.modules[modname] = ext_module
return ext_module
@typed_pos_args('import', str)
@typed_kwargs(
'import',
REQUIRED_KW.evolve(since='0.59.0'),
DISABLER_KW.evolve(since='0.59.0'),
)
@disablerIfNotFound
def func_import(self, node: mparser.BaseNode, args: T.Tuple[str],
kwargs: 'kwargs.FuncImportModule') -> T.Union[ExtensionModule, NewExtensionModule, NotFoundExtensionModule]:
modname = args[0]
disabled, required, _ = extract_required_kwarg(kwargs, self.subproject)
if disabled:
return NotFoundExtensionModule()
if modname.startswith('unstable-'):
plainname = modname.split('-', 1)[1]
try:
# check if stable module exists
mod = self._import_module(plainname, required)
# XXX: this is actually not helpful, since it doesn't do a version check
mlog.warning(f'Module {modname} is now stable, please use the {plainname} module instead.')
return mod
except InvalidArguments:
mlog.warning(f'Module {modname} has no backwards or forwards compatibility and might not exist in future releases.', location=node)
modname = 'unstable_' + plainname
return self._import_module(modname, required)
@typed_pos_args('files', varargs=str)
@noKwargs
def func_files(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'TYPE_kwargs') -> T.List[mesonlib.File]:
return [mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, fname) for fname in args[0]]
# Used by declare_dependency() and pkgconfig.generate()
def extract_variables(self, kwargs, argname='variables', list_new=False, dict_new=False):
variables = kwargs.get(argname, {})
if isinstance(variables, dict):
if dict_new and variables:
FeatureNew.single_use(f'{argname} as dictionary', '0.56.0', self.subproject, location=self.current_node)
else:
varlist = mesonlib.stringlistify(variables)
if list_new:
FeatureNew.single_use(f'{argname} as list of strings', '0.56.0', self.subproject, location=self.current_node)
variables = collections.OrderedDict()
for v in varlist:
try:
(key, value) = v.split('=', 1)
except ValueError:
raise InterpreterException(f'Variable {v!r} must have a value separated by equals sign.')
variables[key.strip()] = value.strip()
for k, v in variables.items():
if not k or not v:
raise InterpreterException('Empty variable name or value')
if any(c.isspace() for c in k):
raise InterpreterException(f'Invalid whitespace in variable name "{k}"')
if not isinstance(v, str):
raise InterpreterException('variables values must be strings.')
return variables
@FeatureNewKwargs('declare_dependency', '0.46.0', ['link_whole'])
@FeatureNewKwargs('declare_dependency', '0.54.0', ['variables'])
@FeatureNewKwargs('declare_dependency', '0.62.0', ['d_module_versions', 'd_import_dirs'])
@permittedKwargs({'include_directories', 'link_with', 'sources', 'dependencies',
'compile_args', 'link_args', 'link_whole', 'version',
'variables', 'd_module_versions', 'd_import_dirs'})
@noPosargs
def func_declare_dependency(self, node, args, kwargs):
version = kwargs.get('version', self.project_version)
if not isinstance(version, str):
raise InterpreterException('Version must be a string.')
incs = self.extract_incdirs(kwargs)
libs = extract_as_list(kwargs, 'link_with')
libs_whole = extract_as_list(kwargs, 'link_whole')
sources = extract_as_list(kwargs, 'sources')
sources = listify(self.source_strings_to_files(sources))
deps = extract_as_list(kwargs, 'dependencies')
compile_args = mesonlib.stringlistify(kwargs.get('compile_args', []))
link_args = mesonlib.stringlistify(kwargs.get('link_args', []))
variables = self.extract_variables(kwargs, list_new=True)
d_module_versions = extract_as_list(kwargs, 'd_module_versions')
d_import_dirs = self.extract_incdirs(kwargs, 'd_import_dirs')
final_deps = []
for d in deps:
if not isinstance(d, (dependencies.Dependency, dependencies.ExternalLibrary, dependencies.InternalDependency)):
raise InterpreterException('Dependencies must be external deps')
final_deps.append(d)
for l in libs:
if isinstance(l, dependencies.Dependency):
raise InterpreterException('''Entries in "link_with" may only be self-built targets,
external dependencies (including libraries) must go to "dependencies".''')
dep = dependencies.InternalDependency(version, incs, compile_args,
link_args, libs, libs_whole, sources, final_deps,
variables, d_module_versions, d_import_dirs)
return dep
@typed_pos_args('assert', bool, optargs=[str])
@noKwargs
def func_assert(self, node: mparser.FunctionNode, args: T.Tuple[bool, T.Optional[str]],
kwargs: 'TYPE_kwargs') -> None:
value, message = args
if message is None:
FeatureNew.single_use('assert function without message argument', '0.53.0', self.subproject, location=node)
if not value:
if message is None:
from ..ast import AstPrinter
printer = AstPrinter()
node.args.arguments[0].accept(printer)
message = printer.result
raise InterpreterException('Assert failed: ' + message)
def validate_arguments(self, args, argcount, arg_types):
if argcount is not None:
if argcount != len(args):
raise InvalidArguments(f'Expected {argcount} arguments, got {len(args)}.')
for actual, wanted in zip(args, arg_types):
if wanted is not None:
if not isinstance(actual, wanted):
raise InvalidArguments('Incorrect argument type.')
# Executables aren't actually accepted, but we allow them here to allow for
# better error messages when overridden
@typed_pos_args(
'run_command',
(build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str),
varargs=(build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str))
@typed_kwargs(
'run_command',
KwargInfo('check', (bool, NoneType), since='0.47.0'),
KwargInfo('capture', bool, default=True, since='0.47.0'),
ENV_KW.evolve(since='0.50.0'),
)
def func_run_command(self, node: mparser.BaseNode,
args: T.Tuple[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str],
T.List[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str]]],
kwargs: 'kwargs.RunCommand') -> RunProcess:
return self.run_command_impl(node, args, kwargs)
def run_command_impl(self,
node: mparser.BaseNode,
args: T.Tuple[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str],
T.List[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str]]],
kwargs: 'kwargs.RunCommand',
in_builddir: bool = False) -> RunProcess:
cmd, cargs = args
capture = kwargs['capture']
env = kwargs['env']
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
check = kwargs['check']
if check is None:
mlog.warning(implicit_check_false_warning, once=True)
check = False
overridden_msg = ('Program {!r} was overridden with the compiled '
'executable {!r} and therefore cannot be used during '
'configuration')
expanded_args: T.List[str] = []
if isinstance(cmd, build.Executable):
progname = node.args.arguments[0].value
raise InterpreterException(overridden_msg.format(progname, cmd.description()))
if isinstance(cmd, ExternalProgram):
if not cmd.found():
raise InterpreterException(f'command {cmd.get_name()!r} not found or not executable')
elif isinstance(cmd, compilers.Compiler):
exelist = cmd.get_exelist()
cmd = exelist[0]
prog = ExternalProgram(cmd, silent=True)
if not prog.found():
raise InterpreterException(f'Program {cmd!r} not found or not executable')
cmd = prog
expanded_args = exelist[1:]
else:
if isinstance(cmd, mesonlib.File):
cmd = cmd.absolute_path(srcdir, builddir)
# Prefer scripts in the current source directory
search_dir = os.path.join(srcdir, self.subdir)
prog = ExternalProgram(cmd, silent=True, search_dir=search_dir)
if not prog.found():
raise InterpreterException(f'Program or command {cmd!r} not found or not executable')
cmd = prog
for a in cargs:
if isinstance(a, str):
expanded_args.append(a)
elif isinstance(a, mesonlib.File):
expanded_args.append(a.absolute_path(srcdir, builddir))
elif isinstance(a, ExternalProgram):
expanded_args.append(a.get_path())
elif isinstance(a, compilers.Compiler):
FeatureNew.single_use('Compiler object as a variadic argument to `run_command`', '0.61.0', self.subproject, location=node)
prog = ExternalProgram(a.exelist[0], silent=True)
if not prog.found():
raise InterpreterException(f'Program {cmd!r} not found or not executable')
expanded_args.append(prog.get_path())
else:
raise InterpreterException(overridden_msg.format(a.name, cmd.description()))
# If any file that was used as an argument to the command
# changes, we must re-run the configuration step.
self.add_build_def_file(cmd.get_path())
for a in expanded_args:
if not os.path.isabs(a):
a = os.path.join(builddir if in_builddir else srcdir, self.subdir, a)
self.add_build_def_file(a)
return RunProcess(cmd, expanded_args, env, srcdir, builddir, self.subdir,
self.environment.get_build_command() + ['introspect'],
in_builddir=in_builddir, check=check, capture=capture)
def func_gettext(self, nodes, args, kwargs):
raise InterpreterException('Gettext() function has been moved to module i18n. Import it and use i18n.gettext() instead')
def func_option(self, nodes, args, kwargs):
raise InterpreterException('Tried to call option() in build description file. All options must be in the option file.')
@typed_pos_args('subproject', str)
@typed_kwargs(
'subproject',
REQUIRED_KW,
DEFAULT_OPTIONS.evolve(since='0.38.0'),
KwargInfo('version', ContainerTypeInfo(list, str), default=[], listify=True),
)
def func_subproject(self, nodes: mparser.BaseNode, args: T.Tuple[str], kwargs_: kwargs.Subproject) -> SubprojectHolder:
kw: kwargs.DoSubproject = {
'required': kwargs_['required'],
'default_options': kwargs_['default_options'],
'version': kwargs_['version'],
'options': None,
'cmake_options': [],
}
return self.do_subproject(args[0], 'meson', kw)
def disabled_subproject(self, subp_name: str, disabled_feature: T.Optional[str] = None,
exception: T.Optional[Exception] = None) -> SubprojectHolder:
sub = SubprojectHolder(NullSubprojectInterpreter(), os.path.join(self.subproject_dir, subp_name),
disabled_feature=disabled_feature, exception=exception)
self.subprojects[subp_name] = sub
self.coredata.initialized_subprojects.add(subp_name)
return sub
def do_subproject(self, subp_name: str, method: Literal['meson', 'cmake'], kwargs: kwargs.DoSubproject) -> SubprojectHolder:
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Subproject', mlog.bold(subp_name), ':', 'skipped: feature', mlog.bold(feature), 'disabled')
return self.disabled_subproject(subp_name, disabled_feature=feature)
default_options = coredata.create_options_dict(kwargs['default_options'], subp_name)
if subp_name == '':
raise InterpreterException('Subproject name must not be empty.')
if subp_name[0] == '.':
raise InterpreterException('Subproject name must not start with a period.')
if '..' in subp_name:
raise InterpreterException('Subproject name must not contain a ".." path segment.')
if os.path.isabs(subp_name):
raise InterpreterException('Subproject name must not be an absolute path.')
if has_path_sep(subp_name):
mlog.warning('Subproject name has a path separator. This may cause unexpected behaviour.',
location=self.current_node)
if subp_name in self.subproject_stack:
fullstack = self.subproject_stack + [subp_name]
incpath = ' => '.join(fullstack)
raise InvalidCode(f'Recursive include of subprojects: {incpath}.')
if subp_name in self.subprojects:
subproject = self.subprojects[subp_name]
if required and not subproject.found():
raise InterpreterException(f'Subproject "{subproject.subdir}" required but not found.')
if kwargs['version']:
pv = self.build.subprojects[subp_name]
wanted = kwargs['version']
if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]:
raise InterpreterException(f'Subproject {subp_name} version is {pv} but {wanted} required.')
return subproject
r = self.environment.wrap_resolver
try:
subdir = r.resolve(subp_name, method)
except wrap.WrapException as e:
if not required:
mlog.log(e)
mlog.log('Subproject ', mlog.bold(subp_name), 'is buildable:', mlog.red('NO'), '(disabling)')
return self.disabled_subproject(subp_name, exception=e)
raise e
subdir_abs = os.path.join(self.environment.get_source_dir(), subdir)
os.makedirs(os.path.join(self.build.environment.get_build_dir(), subdir), exist_ok=True)
self.global_args_frozen = True
stack = ':'.join(self.subproject_stack + [subp_name])
m = ['\nExecuting subproject', mlog.bold(stack)]
if method != 'meson':
m += ['method', mlog.bold(method)]
mlog.log(*m, '\n', nested=False)
try:
if method == 'meson':
return self._do_subproject_meson(subp_name, subdir, default_options, kwargs)
elif method == 'cmake':
return self._do_subproject_cmake(subp_name, subdir, subdir_abs, default_options, kwargs)
else:
raise mesonlib.MesonBugException(f'The method {method} is invalid for the subproject {subp_name}')
# Invalid code is always an error
except InvalidCode:
raise
except Exception as e:
if not required:
with mlog.nested(subp_name):
# Suppress the 'ERROR:' prefix because this exception is not
# fatal and VS CI treat any logs with "ERROR:" as fatal.
mlog.exception(e, prefix=mlog.yellow('Exception:'))
mlog.log('\nSubproject', mlog.bold(subdir), 'is buildable:', mlog.red('NO'), '(disabling)')
return self.disabled_subproject(subp_name, exception=e)
raise e
def _do_subproject_meson(self, subp_name: str, subdir: str,
default_options: T.Dict[OptionKey, str],
kwargs: kwargs.DoSubproject,
ast: T.Optional[mparser.CodeBlockNode] = None,
build_def_files: T.Optional[T.List[str]] = None,
is_translated: bool = False) -> SubprojectHolder:
with mlog.nested(subp_name):
new_build = self.build.copy()
subi = Interpreter(new_build, self.backend, subp_name, subdir, self.subproject_dir,
default_options, ast=ast, is_translated=is_translated,
user_defined_options=self.user_defined_options)
# Those lists are shared by all interpreters. That means that
# even if the subproject fails, any modification that the subproject
# made to those lists will affect the parent project.
subi.subprojects = self.subprojects
subi.modules = self.modules
subi.holder_map = self.holder_map
subi.bound_holder_map = self.bound_holder_map
subi.summary = self.summary
subi.subproject_stack = self.subproject_stack + [subp_name]
current_active = self.active_projectname
current_warnings_counter = mlog.log_warnings_counter
mlog.log_warnings_counter = 0
subi.run()
subi_warnings = mlog.log_warnings_counter
mlog.log_warnings_counter = current_warnings_counter
mlog.log('Subproject', mlog.bold(subp_name), 'finished.')
mlog.log()
if kwargs['version']:
pv = subi.project_version
wanted = kwargs['version']
if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]:
raise InterpreterException(f'Subproject {subp_name} version is {pv} but {wanted} required.')
self.active_projectname = current_active
self.subprojects.update(subi.subprojects)
self.subprojects[subp_name] = SubprojectHolder(subi, subdir, warnings=subi_warnings)
# Duplicates are possible when subproject uses files from project root
if build_def_files:
self.build_def_files.update(build_def_files)
# We always need the subi.build_def_files, to propgate sub-sub-projects
self.build_def_files.update(subi.build_def_files)
self.build.merge(subi.build)
self.build.subprojects[subp_name] = subi.project_version
self.coredata.initialized_subprojects.add(subp_name)
return self.subprojects[subp_name]
def _do_subproject_cmake(self, subp_name: str, subdir: str, subdir_abs: str,
default_options: T.Dict[OptionKey, str],
kwargs: kwargs.DoSubproject) -> SubprojectHolder:
with mlog.nested(subp_name):
new_build = self.build.copy()
prefix = self.coredata.options[OptionKey('prefix')].value
from ..modules.cmake import CMakeSubprojectOptions
options = kwargs['options'] or CMakeSubprojectOptions()
cmake_options = kwargs['cmake_options'] + options.cmake_options
cm_int = CMakeInterpreter(new_build, Path(subdir), Path(subdir_abs), Path(prefix), new_build.environment, self.backend)
cm_int.initialise(cmake_options)
cm_int.analyse()
# Generate a meson ast and execute it with the normal do_subproject_meson
ast = cm_int.pretend_to_be_meson(options.target_options)
mlog.log()
with mlog.nested('cmake-ast'):
mlog.log('Processing generated meson AST')
# Debug print the generated meson file
from ..ast import AstIndentationGenerator, AstPrinter
printer = AstPrinter()
ast.accept(AstIndentationGenerator())
ast.accept(printer)
printer.post_process()
meson_filename = os.path.join(self.build.environment.get_build_dir(), subdir, 'meson.build')
with open(meson_filename, "w", encoding='utf-8') as f:
f.write(printer.result)
mlog.log('Build file:', meson_filename)
mlog.cmd_ci_include(meson_filename)
mlog.log()
result = self._do_subproject_meson(subp_name, subdir, default_options, kwargs, ast, [str(f) for f in cm_int.bs_files], is_translated=True)
result.cm_interpreter = cm_int
mlog.log()
return result
def get_option_internal(self, optname: str) -> coredata.UserOption:
key = OptionKey.from_string(optname).evolve(subproject=self.subproject)
if not key.is_project():
for opts in [self.coredata.options, compilers.base_options]:
v = opts.get(key)
if v is None or v.yielding:
v = opts.get(key.as_root())
if v is not None:
assert isinstance(v, coredata.UserOption), 'for mypy'
return v
try:
opt = self.coredata.options[key]
if opt.yielding and key.subproject and key.as_root() in self.coredata.options:
popt = self.coredata.options[key.as_root()]
if type(opt) is type(popt):
opt = popt
else:
# Get class name, then option type as a string
opt_type = opt.__class__.__name__[4:][:-6].lower()
popt_type = popt.__class__.__name__[4:][:-6].lower()
# This is not a hard error to avoid dependency hell, the workaround
# when this happens is to simply set the subproject's option directly.
mlog.warning('Option {0!r} of type {1!r} in subproject {2!r} cannot yield '
'to parent option of type {3!r}, ignoring parent value. '
'Use -D{2}:{0}=value to set the value for this option manually'
'.'.format(optname, opt_type, self.subproject, popt_type),
location=self.current_node)
return opt
except KeyError:
pass
raise InterpreterException(f'Tried to access unknown option {optname!r}.')
@typed_pos_args('get_option', str)
@noKwargs
def func_get_option(self, nodes: mparser.BaseNode, args: T.Tuple[str],
kwargs: 'TYPE_kwargs') -> T.Union[coredata.UserOption, 'TYPE_var']:
optname = args[0]
if ':' in optname:
raise InterpreterException('Having a colon in option name is forbidden, '
'projects are not allowed to directly access '
'options of other subprojects.')
opt = self.get_option_internal(optname)
if isinstance(opt, coredata.UserFeatureOption):
opt.name = optname
return opt
elif isinstance(opt, coredata.UserOption):
return opt.value
return opt
@typed_pos_args('configuration_data', optargs=[dict])
@noKwargs
def func_configuration_data(self, node: mparser.BaseNode, args: T.Tuple[T.Optional[T.Dict[str, T.Any]]],
kwargs: 'TYPE_kwargs') -> build.ConfigurationData:
initial_values = args[0]
if initial_values is not None:
FeatureNew.single_use('configuration_data dictionary', '0.49.0', self.subproject, location=node)
for k, v in initial_values.items():
if not isinstance(v, (str, int, bool)):
raise InvalidArguments(
f'"configuration_data": initial value dictionary key "{k!r}"" must be "str | int | bool", not "{v!r}"')
return build.ConfigurationData(initial_values)
def set_backend(self) -> None:
# The backend is already set when parsing subprojects
if self.backend is not None:
return
backend = self.coredata.get_option(OptionKey('backend'))
from ..backend import backends
self.backend = backends.get_backend_from_name(backend, self.build, self)
if self.backend is None:
raise InterpreterException(f'Unknown backend "{backend}".')
if backend != self.backend.name:
if self.backend.name.startswith('vs'):
mlog.log('Auto detected Visual Studio backend:', mlog.bold(self.backend.name))
self.coredata.set_option(OptionKey('backend'), self.backend.name)
# Only init backend options on first invocation otherwise it would
# override values previously set from command line.
if self.environment.first_invocation:
self.coredata.init_backend_options(backend)
options = {k: v for k, v in self.environment.options.items() if k.is_backend()}
self.coredata.set_options(options)
@typed_pos_args('project', str, varargs=str)
@typed_kwargs(
'project',
DEFAULT_OPTIONS,
KwargInfo('meson_version', (str, NoneType)),
KwargInfo(
'version',
(str, mesonlib.File, NoneType, list),
default='undefined',
validator=_project_version_validator,
convertor=lambda x: x[0] if isinstance(x, list) else x,
),
KwargInfo('license', ContainerTypeInfo(list, str), default=['unknown'], listify=True),
KwargInfo('subproject_dir', str, default='subprojects'),
)
def func_project(self, node: mparser.FunctionNode, args: T.Tuple[str, T.List[str]], kwargs: 'kwargs.Project') -> None:
proj_name, proj_langs = args
if ':' in proj_name:
raise InvalidArguments(f"Project name {proj_name!r} must not contain ':'")
# This needs to be evaluated as early as possible, as meson uses this
# for things like deprecation testing.
if kwargs['meson_version']:
cv = coredata.version
pv = kwargs['meson_version']
if not mesonlib.version_compare(cv, pv):
raise InterpreterException(f'Meson version is {cv} but project requires {pv}')
mesonlib.project_meson_versions[self.subproject] = kwargs['meson_version']
if os.path.exists(self.option_file):
oi = optinterpreter.OptionInterpreter(self.subproject)
oi.process(self.option_file)
self.coredata.update_project_options(oi.options)
self.add_build_def_file(self.option_file)
# Do not set default_options on reconfigure otherwise it would override
# values previously set from command line. That means that changing
# default_options in a project will trigger a reconfigure but won't
# have any effect.
self.project_default_options = coredata.create_options_dict(
kwargs['default_options'], self.subproject)
# If this is the first invocation we always need to initialize
# builtins, if this is a subproject that is new in a re-invocation we
# need to initialize builtins for that
if self.environment.first_invocation or (self.subproject != '' and self.subproject not in self.coredata.initialized_subprojects):
default_options = self.project_default_options.copy()
default_options.update(self.default_project_options)
self.coredata.init_builtins(self.subproject)
else:
default_options = {}
self.coredata.set_default_options(default_options, self.subproject, self.environment)
if not self.is_subproject():
self.build.project_name = proj_name
self.active_projectname = proj_name
version = kwargs['version']
if isinstance(version, mesonlib.File):
FeatureNew.single_use('version from file', '0.57.0', self.subproject, location=node)
self.add_build_def_file(version)
ifname = version.absolute_path(self.environment.source_dir,
self.environment.build_dir)
try:
ver_data = Path(ifname).read_text(encoding='utf-8').split('\n')
except FileNotFoundError:
raise InterpreterException('Version file not found.')
if len(ver_data) == 2 and ver_data[1] == '':
ver_data = ver_data[0:1]
if len(ver_data) != 1:
raise InterpreterException('Version file must contain exactly one line of text.')
self.project_version = ver_data[0]
else:
self.project_version = version
if self.build.project_version is None:
self.build.project_version = self.project_version
proj_license = kwargs['license']
self.build.dep_manifest[proj_name] = build.DepManifest(self.project_version, proj_license)
if self.subproject in self.build.projects:
raise InvalidCode('Second call to project().')
# spdirname is the subproject_dir for this project, relative to self.subdir.
# self.subproject_dir is the subproject_dir for the main project, relative to top source dir.
spdirname = kwargs['subproject_dir']
if not isinstance(spdirname, str):
raise InterpreterException('Subproject_dir must be a string')
if os.path.isabs(spdirname):
raise InterpreterException('Subproject_dir must not be an absolute path.')
if spdirname.startswith('.'):
raise InterpreterException('Subproject_dir must not begin with a period.')
if '..' in spdirname:
raise InterpreterException('Subproject_dir must not contain a ".." segment.')
if not self.is_subproject():
self.subproject_dir = spdirname
self.build.subproject_dir = self.subproject_dir
# Load wrap files from this (sub)project.
wrap_mode = self.coredata.get_option(OptionKey('wrap_mode'))
if not self.is_subproject() or wrap_mode != WrapMode.nopromote:
subdir = os.path.join(self.subdir, spdirname)
r = wrap.Resolver(self.environment.get_source_dir(), subdir, self.subproject, wrap_mode)
if self.is_subproject():
self.environment.wrap_resolver.merge_wraps(r)
else:
self.environment.wrap_resolver = r
self.build.projects[self.subproject] = proj_name
mlog.log('Project name:', mlog.bold(proj_name))
mlog.log('Project version:', mlog.bold(self.project_version))
if not self.is_subproject():
# We have to activate VS before adding languages and before calling
# self.set_backend() otherwise it wouldn't be able to detect which
# vs backend version we need. But after setting default_options in case
# the project sets vs backend by default.
backend = self.coredata.get_option(OptionKey('backend'))
force_vsenv = self.user_defined_options.vsenv or backend.startswith('vs')
if mesonlib.setup_vsenv(force_vsenv):
self.build.need_vsenv = True
self.add_languages(proj_langs, True, MachineChoice.HOST)
self.add_languages(proj_langs, False, MachineChoice.BUILD)
self.set_backend()
if not self.is_subproject():
self.check_stdlibs()
@typed_kwargs('add_languages', KwargInfo('native', (bool, NoneType), since='0.54.0'), REQUIRED_KW)
@typed_pos_args('add_languages', varargs=str)
def func_add_languages(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddLanguages') -> bool:
langs = args[0]
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
native = kwargs['native']
if disabled:
for lang in sorted(langs, key=compilers.sort_clink):
mlog.log('Compiler for language', mlog.bold(lang), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
if native is not None:
return self.add_languages(langs, required, self.machine_from_native_kwarg(kwargs))
else:
# absent 'native' means 'both' for backwards compatibility
tv = FeatureNew.get_target_version(self.subproject)
if FeatureNew.check_version(tv, '0.54.0'):
mlog.warning('add_languages is missing native:, assuming languages are wanted for both host and build.',
location=node)
success = self.add_languages(langs, False, MachineChoice.BUILD)
success &= self.add_languages(langs, required, MachineChoice.HOST)
return success
@noArgsFlattening
@noKwargs
def func_message(self, node, args, kwargs):
if len(args) > 1:
FeatureNew.single_use('message with more than one argument', '0.54.0', self.subproject, location=node)
args_str = [stringifyUserArguments(i) for i in args]
self.message_impl(args_str)
def message_impl(self, args):
mlog.log(mlog.bold('Message:'), *args)
@noArgsFlattening
@FeatureNew('summary', '0.53.0')
@typed_pos_args('summary', (str, dict), optargs=[object])
@typed_kwargs(
'summary',
KwargInfo('section', str, default=''),
KwargInfo('bool_yn', bool, default=False),
KwargInfo('list_sep', (str, NoneType), since='0.54.0')
)
def func_summary(self, node: mparser.BaseNode, args: T.Tuple[T.Union[str, T.Dict[str, T.Any]], T.Optional[T.Any]],
kwargs: 'kwargs.Summary') -> None:
if args[1] is None:
if not isinstance(args[0], dict):
raise InterpreterException('Summary first argument must be dictionary.')
values = args[0]
else:
if not isinstance(args[0], str):
raise InterpreterException('Summary first argument must be string.')
values = {args[0]: args[1]}
self.summary_impl(kwargs['section'], values, kwargs)
def summary_impl(self, section: str, values, kwargs: 'kwargs.Summary') -> None:
if self.subproject not in self.summary:
self.summary[self.subproject] = Summary(self.active_projectname, self.project_version)
self.summary[self.subproject].add_section(
section, values, kwargs['bool_yn'], kwargs['list_sep'], self.subproject)
def _print_summary(self) -> None:
# Add automatic 'Supbrojects' section in main project.
all_subprojects = collections.OrderedDict()
for name, subp in sorted(self.subprojects.items()):
value = subp.found()
if subp.disabled_feature:
value = [value, f'Feature {subp.disabled_feature!r} disabled']
elif subp.exception:
value = [value, str(subp.exception)]
elif subp.warnings > 0:
value = [value, f'{subp.warnings} warnings']
all_subprojects[name] = value
if all_subprojects:
self.summary_impl('Subprojects', all_subprojects,
{'bool_yn': True,
'list_sep': ' ',
})
# Add automatic section with all user defined options
if self.user_defined_options:
values = collections.OrderedDict()
if self.user_defined_options.cross_file:
values['Cross files'] = self.user_defined_options.cross_file
if self.user_defined_options.native_file:
values['Native files'] = self.user_defined_options.native_file
sorted_options = sorted(self.user_defined_options.cmd_line_options.items())
values.update({str(k): v for k, v in sorted_options})
if values:
self.summary_impl('User defined options', values, {'bool_yn': False, 'list_sep': None})
# Print all summaries, main project last.
mlog.log('') # newline
main_summary = self.summary.pop('', None)
for subp_name, summary in sorted(self.summary.items()):
if self.subprojects[subp_name].found():
summary.dump()
if main_summary:
main_summary.dump()
@noArgsFlattening
@FeatureNew('warning', '0.44.0')
@noKwargs
def func_warning(self, node, args, kwargs):
if len(args) > 1:
FeatureNew.single_use('warning with more than one argument', '0.54.0', self.subproject, location=node)
args_str = [stringifyUserArguments(i) for i in args]
mlog.warning(*args_str, location=node)
@noArgsFlattening
@noKwargs
def func_error(self, node, args, kwargs):
if len(args) > 1:
FeatureNew.single_use('error with more than one argument', '0.58.0', self.subproject, location=node)
args_str = [stringifyUserArguments(i) for i in args]
raise InterpreterException('Problem encountered: ' + ' '.join(args_str))
@noKwargs
@noPosargs
def func_exception(self, node, args, kwargs):
raise Exception()
def add_languages(self, args: T.Sequence[str], required: bool, for_machine: MachineChoice) -> bool:
success = self.add_languages_for(args, required, for_machine)
if not self.coredata.is_cross_build():
self.coredata.copy_build_options_from_regular_ones()
self._redetect_machines()
return success
def should_skip_sanity_check(self, for_machine: MachineChoice) -> bool:
should = self.environment.properties.host.get('skip_sanity_check', False)
if not isinstance(should, bool):
raise InterpreterException('Option skip_sanity_check must be a boolean.')
if for_machine != MachineChoice.HOST and not should:
return False
if not self.environment.is_cross_build() and not should:
return False
return should
def add_languages_for(self, args: T.List[str], required: bool, for_machine: MachineChoice) -> bool:
args = [a.lower() for a in args]
langs = set(self.coredata.compilers[for_machine].keys())
langs.update(args)
# We'd really like to add cython's default language here, but it can't
# actually be done because the cython compiler hasn't been initialized,
# so we can't actually get the option yet. Because we can't know what
# compiler to add by default, and we don't want to add unnecessary
# compilers we don't add anything for cython here, and instead do it
# When the first cython target using a particular language is used.
if 'vala' in langs and 'c' not in langs:
FeatureNew.single_use('Adding Vala language without C', '0.59.0', self.subproject, location=self.current_node)
args.append('c')
success = True
for lang in sorted(args, key=compilers.sort_clink):
clist = self.coredata.compilers[for_machine]
machine_name = for_machine.get_lower_case_name()
if lang in clist:
comp = clist[lang]
else:
try:
comp = compilers.detect_compiler_for(self.environment, lang, for_machine)
if comp is None:
raise InvalidArguments(f'Tried to use unknown language "{lang}".')
if self.should_skip_sanity_check(for_machine):
mlog.log_once('Cross compiler sanity tests disabled via the cross file.')
else:
comp.sanity_check(self.environment.get_scratch_dir(), self.environment)
except Exception:
if not required:
mlog.log('Compiler for language',
mlog.bold(lang), 'for the', machine_name,
'machine not found.')
success = False
continue
else:
raise
if for_machine == MachineChoice.HOST or self.environment.is_cross_build():
logger_fun = mlog.log
else:
logger_fun = mlog.debug
logger_fun(comp.get_display_language(), 'compiler for the', machine_name, 'machine:',
mlog.bold(' '.join(comp.get_exelist())), comp.get_version_string())
if comp.linker is not None:
logger_fun(comp.get_display_language(), 'linker for the', machine_name, 'machine:',
mlog.bold(' '.join(comp.linker.get_exelist())), comp.linker.id, comp.linker.version)
self.build.ensure_static_linker(comp)
return success
def program_from_file_for(self, for_machine: MachineChoice, prognames: T.List[mesonlib.FileOrString]
) -> T.Optional[ExternalProgram]:
for p in prognames:
if isinstance(p, mesonlib.File):
continue # Always points to a local (i.e. self generated) file.
if not isinstance(p, str):
raise InterpreterException('Executable name must be a string')
prog = ExternalProgram.from_bin_list(self.environment, for_machine, p)
if prog.found():
return prog
return None
def program_from_system(self, args: T.List[mesonlib.FileOrString], search_dirs: T.List[str],
extra_info: T.List[mlog.TV_Loggable]) -> T.Optional[ExternalProgram]:
# Search for scripts relative to current subdir.
# Do not cache found programs because find_program('foobar')
# might give different results when run from different source dirs.
source_dir = os.path.join(self.environment.get_source_dir(), self.subdir)
for exename in args:
if isinstance(exename, mesonlib.File):
if exename.is_built:
search_dir = os.path.join(self.environment.get_build_dir(),
exename.subdir)
else:
search_dir = os.path.join(self.environment.get_source_dir(),
exename.subdir)
exename = exename.fname
extra_search_dirs = []
elif isinstance(exename, str):
search_dir = source_dir
extra_search_dirs = search_dirs
else:
raise InvalidArguments(f'find_program only accepts strings and files, not {exename!r}')
extprog = ExternalProgram(exename, search_dir=search_dir,
extra_search_dirs=extra_search_dirs,
silent=True)
if extprog.found():
extra_info.append(f"({' '.join(extprog.get_command())})")
return extprog
return None
def program_from_overrides(self, command_names: T.List[mesonlib.FileOrString],
extra_info: T.List['mlog.TV_Loggable']
) -> T.Optional[T.Union[ExternalProgram, OverrideProgram, build.Executable]]:
for name in command_names:
if not isinstance(name, str):
continue
if name in self.build.find_overrides:
exe = self.build.find_overrides[name]
extra_info.append(mlog.blue('(overridden)'))
return exe
return None
def store_name_lookups(self, command_names: T.List[mesonlib.FileOrString]) -> None:
for name in command_names:
if isinstance(name, str):
self.build.searched_programs.add(name)
def add_find_program_override(self, name: str, exe: T.Union[build.Executable, ExternalProgram, 'OverrideProgram']) -> None:
if name in self.build.searched_programs:
raise InterpreterException(f'Tried to override finding of executable "{name}" which has already been found.')
if name in self.build.find_overrides:
raise InterpreterException(f'Tried to override executable "{name}" which has already been overridden.')
self.build.find_overrides[name] = exe
def notfound_program(self, args: T.List[mesonlib.FileOrString]) -> ExternalProgram:
return NonExistingExternalProgram(' '.join(
[a if isinstance(a, str) else a.absolute_path(self.environment.source_dir, self.environment.build_dir)
for a in args]))
# TODO update modules to always pass `for_machine`. It is bad-form to assume
# the host machine.
def find_program_impl(self, args: T.List[mesonlib.FileOrString],
for_machine: MachineChoice = MachineChoice.HOST,
required: bool = True, silent: bool = True,
wanted: T.Union[str, T.List[str]] = '',
search_dirs: T.Optional[T.List[str]] = None,
version_func: T.Optional[T.Callable[[T.Union['ExternalProgram', 'build.Executable', 'OverrideProgram']], str]] = None
) -> T.Union['ExternalProgram', 'build.Executable', 'OverrideProgram']:
args = mesonlib.listify(args)
extra_info: T.List[mlog.TV_Loggable] = []
progobj = self.program_lookup(args, for_machine, required, search_dirs, extra_info)
if progobj is None:
progobj = self.notfound_program(args)
if isinstance(progobj, ExternalProgram) and not progobj.found():
if not silent:
mlog.log('Program', mlog.bold(progobj.get_name()), 'found:', mlog.red('NO'))
if required:
m = 'Program {!r} not found or not executable'
raise InterpreterException(m.format(progobj.get_name()))
return progobj
if wanted:
if version_func:
version = version_func(progobj)
elif isinstance(progobj, build.Executable):
if progobj.subproject:
interp = self.subprojects[progobj.subproject].held_object
else:
interp = self
assert isinstance(interp, Interpreter)
version = interp.project_version
else:
version = progobj.get_version(self)
is_found, not_found, _ = mesonlib.version_compare_many(version, wanted)
if not is_found:
mlog.log('Program', mlog.bold(progobj.name), 'found:', mlog.red('NO'),
'found', mlog.normal_cyan(version), 'but need:',
mlog.bold(', '.join([f"'{e}'" for e in not_found])), *extra_info)
if required:
m = 'Invalid version of program, need {!r} {!r} found {!r}.'
raise InterpreterException(m.format(progobj.name, not_found, version))
return self.notfound_program(args)
extra_info.insert(0, mlog.normal_cyan(version))
# Only store successful lookups
self.store_name_lookups(args)
if not silent:
mlog.log('Program', mlog.bold(progobj.name), 'found:', mlog.green('YES'), *extra_info)
if isinstance(progobj, build.Executable):
progobj.was_returned_by_find_program = True
return progobj
def program_lookup(self, args: T.List[mesonlib.FileOrString], for_machine: MachineChoice,
required: bool, search_dirs: T.List[str], extra_info: T.List[mlog.TV_Loggable]
) -> T.Optional[T.Union[ExternalProgram, build.Executable, OverrideProgram]]:
progobj = self.program_from_overrides(args, extra_info)
if progobj:
return progobj
fallback = None
wrap_mode = self.coredata.get_option(OptionKey('wrap_mode'))
if wrap_mode != WrapMode.nofallback and self.environment.wrap_resolver:
fallback = self.environment.wrap_resolver.find_program_provider(args)
if fallback and wrap_mode == WrapMode.forcefallback:
return self.find_program_fallback(fallback, args, required, extra_info)
progobj = self.program_from_file_for(for_machine, args)
if progobj is None:
progobj = self.program_from_system(args, search_dirs, extra_info)
if progobj is None and args[0].endswith('python3'):
prog = ExternalProgram('python3', mesonlib.python_command, silent=True)
progobj = prog if prog.found() else None
if progobj is None and fallback and required:
progobj = self.find_program_fallback(fallback, args, required, extra_info)
return progobj
def find_program_fallback(self, fallback: str, args: T.List[mesonlib.FileOrString],
required: bool, extra_info: T.List[mlog.TV_Loggable]
) -> T.Optional[T.Union[ExternalProgram, build.Executable, OverrideProgram]]:
mlog.log('Fallback to subproject', mlog.bold(fallback), 'which provides program',
mlog.bold(' '.join(args)))
sp_kwargs: kwargs.DoSubproject = {
'required': required,
'default_options': [],
'version': [],
'cmake_options': [],
'options': None,
}
self.do_subproject(fallback, 'meson', sp_kwargs)
return self.program_from_overrides(args, extra_info)
@typed_pos_args('find_program', varargs=(str, mesonlib.File), min_varargs=1)
@typed_kwargs(
'find_program',
DISABLER_KW.evolve(since='0.49.0'),
NATIVE_KW,
REQUIRED_KW,
KwargInfo('dirs', ContainerTypeInfo(list, str), default=[], listify=True, since='0.53.0'),
KwargInfo('version', ContainerTypeInfo(list, str), default=[], listify=True, since='0.52.0'),
)
@disablerIfNotFound
def func_find_program(self, node: mparser.BaseNode, args: T.Tuple[T.List[mesonlib.FileOrString]],
kwargs: 'kwargs.FindProgram',
) -> T.Union['build.Executable', ExternalProgram, 'OverrideProgram']:
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Program', mlog.bold(' '.join(args[0])), 'skipped: feature', mlog.bold(feature), 'disabled')
return self.notfound_program(args[0])
search_dirs = extract_search_dirs(kwargs)
return self.find_program_impl(args[0], kwargs['native'], required=required,
silent=False, wanted=kwargs['version'],
search_dirs=search_dirs)
def func_find_library(self, node, args, kwargs):
raise InvalidCode('find_library() is removed, use meson.get_compiler(\'name\').find_library() instead.\n'
'Look here for documentation: http://mesonbuild.com/Reference-manual.html#compiler-object\n'
'Look here for example: http://mesonbuild.com/howtox.html#add-math-library-lm-portably\n'
)
# When adding kwargs, please check if they make sense in dependencies.get_dep_identifier()
@FeatureNewKwargs('dependency', '0.57.0', ['cmake_package_version'])
@FeatureNewKwargs('dependency', '0.56.0', ['allow_fallback'])
@FeatureNewKwargs('dependency', '0.54.0', ['components'])
@FeatureNewKwargs('dependency', '0.52.0', ['include_type'])
@FeatureNewKwargs('dependency', '0.50.0', ['not_found_message', 'cmake_module_path', 'cmake_args'])
@FeatureNewKwargs('dependency', '0.49.0', ['disabler'])
@FeatureNewKwargs('dependency', '0.40.0', ['method'])
@FeatureNewKwargs('dependency', '0.38.0', ['default_options'])
@disablerIfNotFound
@permittedKwargs(permitted_dependency_kwargs)
@typed_pos_args('dependency', varargs=str, min_varargs=1)
def func_dependency(self, node, args, kwargs):
# Replace '' by empty list of names
names = [n for n in args[0] if n]
if len(names) > 1:
FeatureNew('dependency with more than one name', '0.60.0').use(self.subproject)
allow_fallback = kwargs.get('allow_fallback')
if allow_fallback is not None and not isinstance(allow_fallback, bool):
raise InvalidArguments('"allow_fallback" argument must be boolean')
fallback = kwargs.get('fallback')
default_options = kwargs.get('default_options')
df = DependencyFallbacksHolder(self, names, allow_fallback, default_options)
df.set_fallback(fallback)
not_found_message = kwargs.get('not_found_message', '')
if not isinstance(not_found_message, str):
raise InvalidArguments('The not_found_message must be a string.')
try:
d = df.lookup(kwargs)
except Exception:
if not_found_message:
self.message_impl([not_found_message])
raise
assert isinstance(d, Dependency)
if not d.found() and not_found_message:
self.message_impl([not_found_message])
self.message_impl([not_found_message])
# Ensure the correct include type
if 'include_type' in kwargs:
wanted = kwargs['include_type']
if not isinstance(wanted, str):
raise InvalidArguments('The `include_type` kwarg must be a string')
actual = d.get_include_type()
if wanted != actual:
mlog.debug(f'Current include type of {args[0]} is {actual}. Converting to requested {wanted}')
d = d.generate_system_dependency(wanted)
if d.feature_since is not None:
version, extra_msg = d.feature_since
FeatureNew.single_use(f'dep {d.name!r} custom lookup', version, self.subproject, extra_msg, node)
for f in d.featurechecks:
f.use(self.subproject, node)
return d
@FeatureNew('disabler', '0.44.0')
@noKwargs
@noPosargs
def func_disabler(self, node, args, kwargs):
return Disabler()
@FeatureNewKwargs('executable', '0.42.0', ['implib'])
@FeatureNewKwargs('executable', '0.56.0', ['win_subsystem'])
@FeatureDeprecatedKwargs('executable', '0.56.0', ['gui_app'], extra_message="Use 'win_subsystem' instead.")
@permittedKwargs(build.known_exe_kwargs)
def func_executable(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.Executable)
@permittedKwargs(build.known_stlib_kwargs)
def func_static_lib(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.StaticLibrary)
@permittedKwargs(build.known_shlib_kwargs)
def func_shared_lib(self, node, args, kwargs):
holder = self.build_target(node, args, kwargs, build.SharedLibrary)
holder.shared_library_only = True
return holder
@permittedKwargs(known_library_kwargs)
def func_both_lib(self, node, args, kwargs):
return self.build_both_libraries(node, args, kwargs)
@FeatureNew('shared_module', '0.37.0')
@permittedKwargs(build.known_shmod_kwargs)
def func_shared_module(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.SharedModule)
@permittedKwargs(known_library_kwargs)
def func_library(self, node, args, kwargs):
return self.build_library(node, args, kwargs)
@permittedKwargs(build.known_jar_kwargs)
def func_jar(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.Jar)
@FeatureNewKwargs('build_target', '0.40.0', ['link_whole', 'override_options'])
@permittedKwargs(known_build_target_kwargs)
def func_build_target(self, node, args, kwargs):
if 'target_type' not in kwargs:
raise InterpreterException('Missing target_type keyword argument')
target_type = kwargs.pop('target_type')
if target_type == 'executable':
return self.build_target(node, args, kwargs, build.Executable)
elif target_type == 'shared_library':
return self.build_target(node, args, kwargs, build.SharedLibrary)
elif target_type == 'shared_module':
FeatureNew('build_target(target_type: \'shared_module\')',
'0.51.0').use(self.subproject)
return self.build_target(node, args, kwargs, build.SharedModule)
elif target_type == 'static_library':
return self.build_target(node, args, kwargs, build.StaticLibrary)
elif target_type == 'both_libraries':
return self.build_both_libraries(node, args, kwargs)
elif target_type == 'library':
return self.build_library(node, args, kwargs)
elif target_type == 'jar':
return self.build_target(node, args, kwargs, build.Jar)
else:
raise InterpreterException('Unknown target_type.')
@noPosargs
@typed_kwargs(
'vcs_tag',
CT_INPUT_KW.evolve(required=True),
CT_OUTPUT_KW,
# Cannot use the COMMAND_KW because command is allowed to be empty
KwargInfo(
'command',
ContainerTypeInfo(list, (str, build.BuildTarget, build.CustomTarget, build.CustomTargetIndex, ExternalProgram, mesonlib.File)),
listify=True,
default=[],
),
KwargInfo('fallback', (str, NoneType)),
KwargInfo('replace_string', str, default='@VCS_TAG@'),
)
def func_vcs_tag(self, node: mparser.BaseNode, args: T.List['TYPE_var'], kwargs: 'kwargs.VcsTag') -> build.CustomTarget:
if kwargs['fallback'] is None:
FeatureNew.single_use('Optional fallback in vcs_tag', '0.41.0', self.subproject, location=node)
fallback = kwargs['fallback'] or self.project_version
replace_string = kwargs['replace_string']
regex_selector = '(.*)' # default regex selector for custom command: use complete output
vcs_cmd = kwargs['command']
source_dir = os.path.normpath(os.path.join(self.environment.get_source_dir(), self.subdir))
if vcs_cmd:
if isinstance(vcs_cmd[0], mesonlib.File):
FeatureNew.single_use('vcs_tag with file as the first argument', '0.62.0', self.subproject, location=node)
maincmd = self.find_program_impl(vcs_cmd[0], required=False)
if maincmd.found():
vcs_cmd[0] = maincmd
else:
vcs = mesonlib.detect_vcs(source_dir)
if vcs:
mlog.log('Found {} repository at {}'.format(vcs['name'], vcs['wc_dir']))
vcs_cmd = vcs['get_rev'].split()
regex_selector = vcs['rev_regex']
else:
vcs_cmd = [' '] # executing this cmd will fail in vcstagger.py and force to use the fallback string
# vcstagger.py parameters: infile, outfile, fallback, source_dir, replace_string, regex_selector, command...
self._validate_custom_target_outputs(len(kwargs['input']) > 1, kwargs['output'], "vcs_tag")
tg = build.CustomTarget(
kwargs['output'][0],
self.subdir,
self.subproject,
self.environment.get_build_command() +
['--internal',
'vcstagger',
'@INPUT0@',
'@OUTPUT0@',
fallback,
source_dir,
replace_string,
regex_selector] + vcs_cmd,
self.source_strings_to_files(kwargs['input']),
kwargs['output'],
build_by_default=True,
build_always_stale=True,
)
self.add_target(tg.name, tg)
return tg
@FeatureNew('subdir_done', '0.46.0')
@noPosargs
@noKwargs
def func_subdir_done(self, node, args, kwargs):
raise SubdirDoneRequest()
@staticmethod
def _validate_custom_target_outputs(has_multi_in: bool, outputs: T.Iterable[str], name: str) -> None:
"""Checks for additional invalid values in a custom_target output.
This cannot be done with typed_kwargs because it requires the number of
inputs.
"""
for out in outputs:
if has_multi_in and ('@PLAINNAME@' in out or '@BASENAME@' in out):
raise InvalidArguments(f'{name}: output cannot containe "@PLAINNAME@" or "@BASENAME@" '
'when there is more than one input (we can\'t know which to use)')
@typed_pos_args('custom_target', optargs=[str])
@typed_kwargs(
'custom_target',
COMMAND_KW,
CT_BUILD_ALWAYS,
CT_BUILD_ALWAYS_STALE,
CT_BUILD_BY_DEFAULT,
CT_INPUT_KW,
CT_INSTALL_DIR_KW,
CT_INSTALL_TAG_KW,
CT_OUTPUT_KW,
DEPENDS_KW,
DEPEND_FILES_KW,
DEPFILE_KW,
ENV_KW.evolve(since='0.57.0'),
INSTALL_KW,
INSTALL_MODE_KW.evolve(since='0.47.0'),
OVERRIDE_OPTIONS_KW,
KwargInfo('feed', bool, default=False, since='0.59.0'),
KwargInfo('capture', bool, default=False),
KwargInfo('console', bool, default=False, since='0.48.0'),
)
def func_custom_target(self, node: mparser.FunctionNode, args: T.Tuple[str],
kwargs: 'kwargs.CustomTarget') -> build.CustomTarget:
if kwargs['depfile'] and ('@BASENAME@' in kwargs['depfile'] or '@PLAINNAME@' in kwargs['depfile']):
FeatureNew.single_use('substitutions in custom_target depfile', '0.47.0', self.subproject, location=node)
# Don't mutate the kwargs
build_by_default = kwargs['build_by_default']
build_always_stale = kwargs['build_always_stale']
# Remap build_always to build_by_default and build_always_stale
if kwargs['build_always'] is not None and kwargs['build_always_stale'] is not None:
raise InterpreterException('CustomTarget: "build_always" and "build_always_stale" are mutually exclusive')
if build_by_default is None and kwargs['install']:
build_by_default = True
elif kwargs['build_always'] is not None:
if build_by_default is None:
build_by_default = kwargs['build_always']
build_always_stale = kwargs['build_by_default']
# These are are nullaable so that we can know whether they're explicitly
# set or not. If they haven't been overwritten, set them to their true
# default
if build_by_default is None:
build_by_default = False
if build_always_stale is None:
build_always_stale = False
name = args[0]
if name is None:
# name will default to first output, but we cannot do that yet because
# they could need substitutions (e.g. @BASENAME@) first. CustomTarget()
# will take care of setting a proper default but name must be an empty
# string in the meantime.
FeatureNew.single_use('custom_target() with no name argument', '0.60.0', self.subproject, location=node)
name = ''
inputs = self.source_strings_to_files(kwargs['input'], strict=False)
command = kwargs['command']
if command and isinstance(command[0], str):
command[0] = self.find_program_impl([command[0]])
if len(inputs) > 1 and kwargs['feed']:
raise InvalidArguments('custom_target: "feed" keyword argument can only be used used with a single input')
if len(kwargs['output']) > 1 and kwargs['capture']:
raise InvalidArguments('custom_target: "capture" keyword argument can only be used used with a single output')
if kwargs['capture'] and kwargs['console']:
raise InvalidArguments('custom_target: "capture" and "console" keyword arguments are mutually exclusive')
for c in command:
if kwargs['capture'] and isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('custom_target: "capture" keyword argument cannot be used with "@OUTPUT@"')
if kwargs['feed'] and isinstance(c, str) and '@INPUT@' in c:
raise InvalidArguments('custom_target: "feed" keyword argument cannot be used with "@INPUT@"')
if kwargs['install'] and not kwargs['install_dir']:
raise InvalidArguments('custom_target: "install_dir" keyword argument must be set when "install" is true.')
if len(kwargs['install_dir']) > 1:
FeatureNew.single_use('multiple install_dir for custom_target', '0.40.0', self.subproject, location=node)
if len(kwargs['install_tag']) not in {0, 1, len(kwargs['output'])}:
raise InvalidArguments('custom_target: install_tag argument must have 0 or 1 outputs, '
'or the same number of elements as the output keyword argument. '
f'(there are {len(kwargs["install_tag"])} install_tags, '
f'and {len(kwargs["output"])} outputs)')
self._validate_custom_target_outputs(len(inputs) > 1, kwargs['output'], "custom_target")
tg = build.CustomTarget(
name,
self.subdir,
self.subproject,
command,
inputs,
kwargs['output'],
build_always_stale=build_always_stale,
build_by_default=build_by_default,
capture=kwargs['capture'],
console=kwargs['console'],
depend_files=kwargs['depend_files'],
depfile=kwargs['depfile'],
extra_depends=kwargs['depends'],
env=kwargs['env'],
feed=kwargs['feed'],
install=kwargs['install'],
install_dir=kwargs['install_dir'],
install_mode=kwargs['install_mode'],
install_tag=kwargs['install_tag'],
override_options=kwargs['override_options'],
backend=self.backend)
self.add_target(tg.name, tg)
return tg
@typed_pos_args('run_target', str)
@typed_kwargs(
'run_target',
COMMAND_KW,
DEPENDS_KW,
ENV_KW.evolve(since='0.57.0'),
)
def func_run_target(self, node: mparser.FunctionNode, args: T.Tuple[str],
kwargs: 'kwargs.RunTarget') -> build.RunTarget:
all_args = kwargs['command'].copy()
for i in listify(all_args):
if isinstance(i, ExternalProgram) and not i.found():
raise InterpreterException(f'Tried to use non-existing executable {i.name!r}')
if isinstance(all_args[0], str):
all_args[0] = self.find_program_impl([all_args[0]])
name = args[0]
tg = build.RunTarget(name, all_args, kwargs['depends'], self.subdir, self.subproject, kwargs['env'])
self.add_target(name, tg)
full_name = (self.subproject, name)
assert full_name not in self.build.run_target_names
self.build.run_target_names.add(full_name)
return tg
@FeatureNew('alias_target', '0.52.0')
@typed_pos_args('alias_target', str, varargs=build.Target, min_varargs=1)
@noKwargs
def func_alias_target(self, node: mparser.BaseNode, args: T.Tuple[str, T.List[build.Target]],
kwargs: 'TYPE_kwargs') -> build.AliasTarget:
name, deps = args
tg = build.AliasTarget(name, deps, self.subdir, self.subproject)
self.add_target(name, tg)
return tg
@typed_pos_args('generator', (build.Executable, ExternalProgram))
@typed_kwargs(
'generator',
KwargInfo('arguments', ContainerTypeInfo(list, str, allow_empty=False), required=True, listify=True),
KwargInfo('output', ContainerTypeInfo(list, str, allow_empty=False), required=True, listify=True),
DEPFILE_KW,
DEPENDS_KW,
KwargInfo('capture', bool, default=False, since='0.43.0'),
)
def func_generator(self, node: mparser.FunctionNode,
args: T.Tuple[T.Union[build.Executable, ExternalProgram]],
kwargs: 'kwargs.FuncGenerator') -> build.Generator:
for rule in kwargs['output']:
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"output" must not contain a directory separator.')
if len(kwargs['output']) > 1:
for o in kwargs['output']:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
gen = build.Generator(args[0], **kwargs)
self.generators.append(gen)
return gen
@typed_pos_args('benchmark', str, (build.Executable, build.Jar, ExternalProgram, mesonlib.File))
@typed_kwargs('benchmark', *TEST_KWARGS)
def func_benchmark(self, node: mparser.BaseNode,
args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]],
kwargs: 'kwargs.FuncBenchmark') -> None:
self.add_test(node, args, kwargs, False)
@typed_pos_args('test', str, (build.Executable, build.Jar, ExternalProgram, mesonlib.File))
@typed_kwargs('test', *TEST_KWARGS, KwargInfo('is_parallel', bool, default=True))
def func_test(self, node: mparser.BaseNode,
args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]],
kwargs: 'kwargs.FuncTest') -> None:
self.add_test(node, args, kwargs, True)
def unpack_env_kwarg(self, kwargs: T.Union[build.EnvironmentVariables, T.Dict[str, 'TYPE_var'], T.List['TYPE_var'], str]) -> build.EnvironmentVariables:
envlist = kwargs.get('env')
if envlist is None:
return build.EnvironmentVariables()
msg = ENV_KW.validator(envlist)
if msg:
raise InvalidArguments(f'"env": {msg}')
return ENV_KW.convertor(envlist)
def make_test(self, node: mparser.BaseNode,
args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]],
kwargs: 'kwargs.BaseTest') -> Test:
name = args[0]
if ':' in name:
mlog.deprecation(f'":" is not allowed in test name "{name}", it has been replaced with "_"',
location=node)
name = name.replace(':', '_')
exe = args[1]
if isinstance(exe, ExternalProgram):
if not exe.found():
raise InvalidArguments('Tried to use not-found external program as test exe')
elif isinstance(exe, mesonlib.File):
exe = self.find_program_impl([exe])
env = self.unpack_env_kwarg(kwargs)
if kwargs['timeout'] <= 0:
FeatureNew.single_use('test() timeout <= 0', '0.57.0', self.subproject, location=node)
prj = self.subproject if self.is_subproject() else self.build.project_name
suite: T.List[str] = []
for s in kwargs['suite']:
if s:
s = ':' + s
suite.append(prj.replace(' ', '_').replace(':', '_') + s)
return Test(name,
prj,
suite,
exe,
kwargs['depends'],
kwargs.get('is_parallel', False),
kwargs['args'],
env,
kwargs['should_fail'],
kwargs['timeout'],
kwargs['workdir'],
kwargs['protocol'],
kwargs['priority'],
kwargs['verbose'])
def add_test(self, node: mparser.BaseNode, args: T.List, kwargs: T.Dict[str, T.Any], is_base_test: bool):
t = self.make_test(node, args, kwargs)
if is_base_test:
self.build.tests.append(t)
mlog.debug('Adding test', mlog.bold(t.name, True))
else:
self.build.benchmarks.append(t)
mlog.debug('Adding benchmark', mlog.bold(t.name, True))
@typed_pos_args('install_headers', varargs=(str, mesonlib.File))
@typed_kwargs(
'install_headers',
KwargInfo('install_dir', (str, NoneType)),
KwargInfo('subdir', (str, NoneType)),
INSTALL_MODE_KW.evolve(since='0.47.0'),
)
def func_install_headers(self, node: mparser.BaseNode,
args: T.Tuple[T.List['mesonlib.FileOrString']],
kwargs: 'kwargs.FuncInstallHeaders') -> build.Headers:
source_files = self.source_strings_to_files(args[0])
install_subdir = kwargs['subdir']
if install_subdir is not None:
if kwargs['install_dir'] is not None:
raise InterpreterException('install_headers: cannot specify both "install_dir" and "subdir". Use only "install_dir".')
if os.path.isabs(install_subdir):
mlog.deprecation('Subdir keyword must not be an absolute path. This will be a hard error in the next release.')
h = build.Headers(source_files, install_subdir, kwargs['install_dir'],
kwargs['install_mode'], self.subproject)
self.build.headers.append(h)
return h
@typed_pos_args('install_man', varargs=(str, mesonlib.File))
@typed_kwargs(
'install_man',
KwargInfo('install_dir', (str, NoneType)),
KwargInfo('locale', (str, NoneType), since='0.58.0'),
INSTALL_MODE_KW.evolve(since='0.47.0')
)
def func_install_man(self, node: mparser.BaseNode,
args: T.Tuple[T.List['mesonlib.FileOrString']],
kwargs: 'kwargs.FuncInstallMan') -> build.Man:
# We just need to narrow this, because the input is limited to files and
# Strings as inputs, so only Files will be returned
sources = self.source_strings_to_files(args[0])
for s in sources:
try:
num = int(s.rsplit('.', 1)[-1])
except (IndexError, ValueError):
num = 0
if not 1 <= num <= 9:
raise InvalidArguments('Man file must have a file extension of a number between 1 and 9')
m = build.Man(sources, kwargs['install_dir'], kwargs['install_mode'],
self.subproject, kwargs['locale'])
self.build.man.append(m)
return m
@FeatureNew('install_emptydir', '0.60.0')
@typed_kwargs(
'install_emptydir',
INSTALL_MODE_KW,
KwargInfo('install_tag', (str, NoneType), since='0.62.0')
)
def func_install_emptydir(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs) -> None:
d = build.EmptyDir(args[0], kwargs['install_mode'], self.subproject, kwargs['install_tag'])
self.build.emptydir.append(d)
return d
@FeatureNew('install_symlink', '0.61.0')
@typed_pos_args('symlink_name', str)
@typed_kwargs(
'install_symlink',
KwargInfo('pointing_to', str, required=True),
KwargInfo('install_dir', str, required=True),
INSTALL_TAG_KW,
)
def func_install_symlink(self, node: mparser.BaseNode,
args: T.Tuple[T.List[str]],
kwargs) -> build.SymlinkData:
name = args[0] # Validation while creating the SymlinkData object
target = kwargs['pointing_to']
l = build.SymlinkData(target, name, kwargs['install_dir'],
self.subproject, kwargs['install_tag'])
self.build.symlinks.append(l)
return l
@FeatureNew('structured_sources', '0.62.0')
@typed_pos_args('structured_sources', object, optargs=[dict])
@noKwargs
@noArgsFlattening
def func_structured_sources(
self, node: mparser.BaseNode,
args: T.Tuple[object, T.Optional[T.Dict[str, object]]],
kwargs: 'TYPE_kwargs') -> build.StructuredSources:
valid_types = (str, mesonlib.File, build.GeneratedList, build.CustomTarget, build.CustomTargetIndex, build.GeneratedList)
sources: T.Dict[str, T.List[T.Union[mesonlib.File, 'build.GeneratedTypes']]] = collections.defaultdict(list)
for arg in mesonlib.listify(args[0]):
if not isinstance(arg, valid_types):
raise InvalidArguments(f'structured_sources: type "{type(arg)}" is not valid')
if isinstance(arg, str):
arg = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, arg)
sources[''].append(arg)
if args[1]:
if '' in args[1]:
raise InvalidArguments('structured_sources: keys to dictionary argument may not be an empty string.')
for k, v in args[1].items():
for arg in mesonlib.listify(v):
if not isinstance(arg, valid_types):
raise InvalidArguments(f'structured_sources: type "{type(arg)}" is not valid')
if isinstance(arg, str):
arg = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, arg)
sources[k].append(arg)
return build.StructuredSources(sources)
@typed_pos_args('subdir', str)
@typed_kwargs(
'subdir',
KwargInfo(
'if_found',
ContainerTypeInfo(list, object),
validator=lambda a: 'Objects must have a found() method' if not all(hasattr(x, 'found') for x in a) else None,
since='0.44.0',
default=[],
listify=True,
),
)
def func_subdir(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'kwargs.Subdir') -> None:
mesonlib.check_direntry_issues(args)
if '..' in args[0]:
raise InvalidArguments('Subdir contains ..')
if self.subdir == '' and args[0] == self.subproject_dir:
raise InvalidArguments('Must not go into subprojects dir with subdir(), use subproject() instead.')
if self.subdir == '' and args[0].startswith('meson-'):
raise InvalidArguments('The "meson-" prefix is reserved and cannot be used for top-level subdir().')
if args[0] == '':
raise InvalidArguments("The argument given to subdir() is the empty string ''. This is prohibited.")
for i in kwargs['if_found']:
if not i.found():
return
prev_subdir = self.subdir
subdir = os.path.join(prev_subdir, args[0])
if os.path.isabs(subdir):
raise InvalidArguments('Subdir argument must be a relative path.')
absdir = os.path.join(self.environment.get_source_dir(), subdir)
symlinkless_dir = os.path.realpath(absdir)
build_file = os.path.join(symlinkless_dir, 'meson.build')
if build_file in self.processed_buildfiles:
raise InvalidArguments(f'Tried to enter directory "{subdir}", which has already been visited.')
self.processed_buildfiles.add(build_file)
self.subdir = subdir
os.makedirs(os.path.join(self.environment.build_dir, subdir), exist_ok=True)
buildfilename = os.path.join(self.subdir, environment.build_filename)
self.build_def_files.add(buildfilename)
absname = os.path.join(self.environment.get_source_dir(), buildfilename)
if not os.path.isfile(absname):
self.subdir = prev_subdir
raise InterpreterException(f"Non-existent build file '{buildfilename!s}'")
with open(absname, encoding='utf-8') as f:
code = f.read()
assert isinstance(code, str)
try:
codeblock = mparser.Parser(code, absname).parse()
except mesonlib.MesonException as me:
me.file = absname
raise me
try:
self.evaluate_codeblock(codeblock)
except SubdirDoneRequest:
pass
self.subdir = prev_subdir
def _get_kwarg_install_mode(self, kwargs: T.Dict[str, T.Any]) -> T.Optional[FileMode]:
if kwargs.get('install_mode', None) is None:
return None
if isinstance(kwargs['install_mode'], FileMode):
return kwargs['install_mode']
install_mode: T.List[str] = []
mode = mesonlib.typeslistify(kwargs.get('install_mode', []), (str, int))
for m in mode:
# We skip any arguments that are set to `false`
if m is False:
m = None
install_mode.append(m)
if len(install_mode) > 3:
raise InvalidArguments('Keyword argument install_mode takes at '
'most 3 arguments.')
if len(install_mode) > 0 and install_mode[0] is not None and \
not isinstance(install_mode[0], str):
raise InvalidArguments('Keyword argument install_mode requires the '
'permissions arg to be a string or false')
return FileMode(*install_mode)
@typed_pos_args('install_data', varargs=(str, mesonlib.File))
@typed_kwargs(
'install_data',
KwargInfo('install_dir', (str, NoneType)),
KwargInfo('sources', ContainerTypeInfo(list, (str, mesonlib.File)), listify=True, default=[]),
KwargInfo('rename', ContainerTypeInfo(list, str), default=[], listify=True, since='0.46.0'),
INSTALL_MODE_KW.evolve(since='0.38.0'),
INSTALL_TAG_KW.evolve(since='0.60.0'),
)
def func_install_data(self, node: mparser.BaseNode,
args: T.Tuple[T.List['mesonlib.FileOrString']],
kwargs: 'kwargs.FuncInstallData') -> build.Data:
sources = self.source_strings_to_files(args[0] + kwargs['sources'])
rename = kwargs['rename'] or None
if rename:
if len(rename) != len(sources):
raise InvalidArguments(
'"rename" and "sources" argument lists must be the same length if "rename" is given. '
f'Rename has {len(rename)} elements and sources has {len(sources)}.')
install_dir_name = kwargs['install_dir']
if install_dir_name:
if not os.path.isabs(install_dir_name):
install_dir_name = os.path.join('{datadir}', install_dir_name)
else:
install_dir_name = '{datadir}'
return self.install_data_impl(sources, kwargs['install_dir'], kwargs['install_mode'],
rename, kwargs['install_tag'], install_dir_name)
def install_data_impl(self, sources: T.List[mesonlib.File], install_dir: str,
install_mode: FileMode, rename: T.Optional[str],
tag: T.Optional[str],
install_dir_name: T.Optional[str] = None,
install_data_type: T.Optional[str] = None) -> build.Data:
"""Just the implementation with no validation."""
data = build.Data(sources, install_dir, install_dir_name or install_dir, install_mode,
self.subproject, rename, tag, install_data_type)
self.build.data.append(data)
return data
@typed_pos_args('install_subdir', str)
@typed_kwargs(
'install_subdir',
KwargInfo('install_dir', str, required=True),
KwargInfo('strip_directory', bool, default=False),
KwargInfo('exclude_files', ContainerTypeInfo(list, str),
default=[], listify=True, since='0.42.0',
validator=lambda x: 'cannot be absolute' if any(os.path.isabs(d) for d in x) else None),
KwargInfo('exclude_directories', ContainerTypeInfo(list, str),
default=[], listify=True, since='0.42.0',
validator=lambda x: 'cannot be absolute' if any(os.path.isabs(d) for d in x) else None),
INSTALL_MODE_KW.evolve(since='0.38.0'),
INSTALL_TAG_KW.evolve(since='0.60.0'),
)
def func_install_subdir(self, node: mparser.BaseNode, args: T.Tuple[str],
kwargs: 'kwargs.FuncInstallSubdir') -> build.InstallDir:
exclude = (set(kwargs['exclude_files']), set(kwargs['exclude_directories']))
idir = build.InstallDir(
self.subdir,
args[0],
kwargs['install_dir'],
kwargs['install_mode'],
exclude,
kwargs['strip_directory'],
self.subproject,
install_tag=kwargs['install_tag'])
self.build.install_dirs.append(idir)
return idir
@noPosargs
@typed_kwargs(
'configure_file',
DEPFILE_KW.evolve(since='0.52.0'),
INSTALL_MODE_KW.evolve(since='0.47.0,'),
INSTALL_TAG_KW.evolve(since='0.60.0'),
KwargInfo('capture', bool, default=False, since='0.41.0'),
KwargInfo(
'command',
(ContainerTypeInfo(list, (build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str), allow_empty=False), NoneType),
listify=True,
),
KwargInfo(
'configuration',
(ContainerTypeInfo(dict, (str, int, bool)), build.ConfigurationData, NoneType),
),
KwargInfo('copy', bool, default=False, since='0.47.0'),
KwargInfo('encoding', str, default='utf-8', since='0.47.0'),
KwargInfo('format', str, default='meson', since='0.46.0',
validator=in_set_validator({'meson', 'cmake', 'cmake@'})),
KwargInfo(
'input',
ContainerTypeInfo(list, (mesonlib.File, str)),
listify=True,
default=[],
),
# Cannot use shared implementation until None backwards compat is dropped
KwargInfo('install', (bool, NoneType), since='0.50.0'),
KwargInfo('install_dir', (str, bool), default='',
validator=lambda x: 'must be `false` if boolean' if x is True else None),
KwargInfo('output', str, required=True),
KwargInfo('output_format', str, default='c', since='0.47.0',
validator=in_set_validator({'c', 'nasm'})),
)
def func_configure_file(self, node: mparser.BaseNode, args: T.List[TYPE_var],
kwargs: kwargs.ConfigureFile):
actions = sorted(x for x in {'configuration', 'command', 'copy'}
if kwargs[x] not in [None, False])
num_actions = len(actions)
if num_actions == 0:
raise InterpreterException('Must specify an action with one of these '
'keyword arguments: \'configuration\', '
'\'command\', or \'copy\'.')
elif num_actions == 2:
raise InterpreterException('Must not specify both {!r} and {!r} '
'keyword arguments since they are '
'mutually exclusive.'.format(*actions))
elif num_actions == 3:
raise InterpreterException('Must specify one of {!r}, {!r}, and '
'{!r} keyword arguments since they are '
'mutually exclusive.'.format(*actions))
if kwargs['capture'] and not kwargs['command']:
raise InvalidArguments('configure_file: "capture" keyword requires "command" keyword.')
fmt = kwargs['format']
output_format = kwargs['output_format']
depfile = kwargs['depfile']
# Validate input
inputs = self.source_strings_to_files(kwargs['input'])
inputs_abs = []
for f in inputs:
if isinstance(f, mesonlib.File):
inputs_abs.append(f.absolute_path(self.environment.source_dir,
self.environment.build_dir))
self.add_build_def_file(f)
else:
raise InterpreterException('Inputs can only be strings or file objects')
# Validate output
output = kwargs['output']
if inputs_abs:
values = mesonlib.get_filenames_templates_dict(inputs_abs, None)
outputs = mesonlib.substitute_values([output], values)
output = outputs[0]
if depfile:
depfile = mesonlib.substitute_values([depfile], values)[0]
ofile_rpath = os.path.join(self.subdir, output)
if ofile_rpath in self.configure_file_outputs:
mesonbuildfile = os.path.join(self.subdir, 'meson.build')
current_call = f"{mesonbuildfile}:{self.current_lineno}"
first_call = "{}:{}".format(mesonbuildfile, self.configure_file_outputs[ofile_rpath])
mlog.warning('Output file', mlog.bold(ofile_rpath, True), 'for configure_file() at', current_call, 'overwrites configure_file() output at', first_call)
else:
self.configure_file_outputs[ofile_rpath] = self.current_lineno
if os.path.dirname(output) != '':
raise InterpreterException('Output file name must not contain a subdirectory.')
(ofile_path, ofile_fname) = os.path.split(os.path.join(self.subdir, output))
ofile_abs = os.path.join(self.environment.build_dir, ofile_path, ofile_fname)
# Perform the appropriate action
if kwargs['configuration'] is not None:
conf = kwargs['configuration']
if isinstance(conf, dict):
FeatureNew.single_use('configure_file.configuration dictionary', '0.49.0', self.subproject, location=node)
for k, v in conf.items():
if not isinstance(v, (str, int, bool)):
raise InvalidArguments(
f'"configuration_data": initial value dictionary key "{k!r}"" must be "str | int | bool", not "{v!r}"')
conf = build.ConfigurationData(conf)
mlog.log('Configuring', mlog.bold(output), 'using configuration')
if len(inputs) > 1:
raise InterpreterException('At most one input file can given in configuration mode')
if inputs:
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
file_encoding = kwargs['encoding']
missing_variables, confdata_useless = \
mesonlib.do_conf_file(inputs_abs[0], ofile_abs, conf,
fmt, file_encoding)
if missing_variables:
var_list = ", ".join(map(repr, sorted(missing_variables)))
mlog.warning(
f"The variable(s) {var_list} in the input file '{inputs[0]}' are not "
"present in the given configuration data.", location=node)
if confdata_useless:
ifbase = os.path.basename(inputs_abs[0])
tv = FeatureNew.get_target_version(self.subproject)
if FeatureNew.check_version(tv, '0.47.0'):
mlog.warning('Got an empty configuration_data() object and found no '
f'substitutions in the input file {ifbase!r}. If you want to '
'copy a file to the build dir, use the \'copy:\' keyword '
'argument added in 0.47.0', location=node)
else:
mesonlib.dump_conf_header(ofile_abs, conf, output_format)
conf.used = True
elif kwargs['command'] is not None:
if len(inputs) > 1:
FeatureNew.single_use('multiple inputs in configure_file()', '0.52.0', self.subproject, location=node)
# We use absolute paths for input and output here because the cwd
# that the command is run from is 'unspecified', so it could change.
# Currently it's builddir/subdir for in_builddir else srcdir/subdir.
values = mesonlib.get_filenames_templates_dict(inputs_abs, [ofile_abs])
if depfile:
depfile = os.path.join(self.environment.get_scratch_dir(), depfile)
values['@DEPFILE@'] = depfile
# Substitute @INPUT@, @OUTPUT@, etc here.
_cmd = mesonlib.substitute_values(kwargs['command'], values)
mlog.log('Configuring', mlog.bold(output), 'with command')
cmd, *args = _cmd
res = self.run_command_impl(node, (cmd, args),
{'capture': True, 'check': True, 'env': build.EnvironmentVariables()},
True)
if kwargs['capture']:
dst_tmp = ofile_abs + '~'
file_encoding = kwargs['encoding']
with open(dst_tmp, 'w', encoding=file_encoding) as f:
f.writelines(res.stdout)
if inputs_abs:
shutil.copymode(inputs_abs[0], dst_tmp)
mesonlib.replace_if_different(ofile_abs, dst_tmp)
if depfile:
mlog.log('Reading depfile:', mlog.bold(depfile))
with open(depfile, encoding='utf-8') as f:
df = DepFile(f.readlines())
deps = df.get_all_dependencies(ofile_fname)
for dep in deps:
self.add_build_def_file(dep)
elif kwargs['copy']:
if len(inputs_abs) != 1:
raise InterpreterException('Exactly one input file must be given in copy mode')
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
shutil.copy2(inputs_abs[0], ofile_abs)
# Install file if requested, we check for the empty string
# for backwards compatibility. That was the behaviour before
# 0.45.0 so preserve it.
idir = kwargs['install_dir']
if idir is False:
idir = ''
FeatureDeprecated.single_use('configure_file install_dir: false', '0.50.0',
self.subproject, 'Use the `install:` kwarg instead', location=node)
install = kwargs['install'] if kwargs['install'] is not None else idir != ''
if install:
if not idir:
raise InterpreterException(
'"install_dir" must be specified when "install" in a configure_file is true')
cfile = mesonlib.File.from_built_file(ofile_path, ofile_fname)
install_mode = kwargs['install_mode']
install_tag = kwargs['install_tag']
self.build.data.append(build.Data([cfile], idir, idir, install_mode, self.subproject,
install_tag=install_tag, data_type='configure'))
return mesonlib.File.from_built_file(self.subdir, output)
def extract_incdirs(self, kwargs, key: str = 'include_directories'):
prospectives = extract_as_list(kwargs, key)
result = []
for p in prospectives:
if isinstance(p, build.IncludeDirs):
result.append(p)
elif isinstance(p, str):
result.append(self.build_incdir_object([p]))
else:
raise InterpreterException('Include directory objects can only be created from strings or include directories.')
return result
@typed_pos_args('include_directories', varargs=str)
@typed_kwargs('include_directories', KwargInfo('is_system', bool, default=False))
def func_include_directories(self, node: mparser.BaseNode, args: T.Tuple[T.List[str]],
kwargs: 'kwargs.FuncIncludeDirectories') -> build.IncludeDirs:
return self.build_incdir_object(args[0], kwargs['is_system'])
def build_incdir_object(self, incdir_strings: T.List[str], is_system: bool = False) -> build.IncludeDirs:
if not isinstance(is_system, bool):
raise InvalidArguments('Is_system must be boolean.')
src_root = self.environment.get_source_dir()
build_root = self.environment.get_build_dir()
absbase_src = os.path.join(src_root, self.subdir)
absbase_build = os.path.join(build_root, self.subdir)
for a in incdir_strings:
if a.startswith(src_root):
raise InvalidArguments(textwrap.dedent('''\
Tried to form an absolute path to a source dir.
You should not do that but use relative paths instead.
To get include path to any directory relative to the current dir do
incdir = include_directories(dirname)
After this incdir will contain both the current source dir as well as the
corresponding build dir. It can then be used in any subdirectory and
Meson will take care of all the busywork to make paths work.
Dirname can even be '.' to mark the current directory. Though you should
remember that the current source and build directories are always
put in the include directories by default so you only need to do
include_directories('.') if you intend to use the result in a
different subdirectory.
'''))
else:
try:
self.validate_within_subproject(self.subdir, a)
except InterpreterException:
mlog.warning('include_directories sandbox violation!', location=self.current_node)
print(textwrap.dedent(f'''\
The project is trying to access the directory {a!r} which belongs to a different
subproject. This is a problem as it hardcodes the relative paths of these two projects.
This makes it impossible to compile the project in any other directory layout and also
prevents the subproject from changing its own directory layout.
Instead of poking directly at the internals the subproject should be executed and
it should set a variable that the caller can then use. Something like:
# In subproject
some_dep = declare_dependency(include_directories: include_directories('include'))
# In subproject wrap file
[provide]
some = some_dep
# In parent project
some_dep = dependency('some')
executable(..., dependencies: [some_dep])
This warning will become a hard error in a future Meson release.
'''))
absdir_src = os.path.join(absbase_src, a)
absdir_build = os.path.join(absbase_build, a)
if not os.path.isdir(absdir_src) and not os.path.isdir(absdir_build):
raise InvalidArguments(f'Include dir {a} does not exist.')
i = build.IncludeDirs(self.subdir, incdir_strings, is_system)
return i
@typed_pos_args('add_test_setup', str)
@typed_kwargs(
'add_test_setup',
KwargInfo('exe_wrapper', ContainerTypeInfo(list, (str, ExternalProgram)), listify=True, default=[]),
KwargInfo('gdb', bool, default=False),
KwargInfo('timeout_multiplier', int, default=1),
KwargInfo('exclude_suites', ContainerTypeInfo(list, str), listify=True, default=[], since='0.57.0'),
KwargInfo('is_default', bool, default=False, since='0.49.0'),
ENV_KW,
)
def func_add_test_setup(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'kwargs.AddTestSetup') -> None:
setup_name = args[0]
if re.fullmatch('([_a-zA-Z][_0-9a-zA-Z]*:)?[_a-zA-Z][_0-9a-zA-Z]*', setup_name) is None:
raise InterpreterException('Setup name may only contain alphanumeric characters.')
if ":" not in setup_name:
setup_name = f'{(self.subproject if self.subproject else self.build.project_name)}:{setup_name}'
exe_wrapper: T.List[str] = []
for i in kwargs['exe_wrapper']:
if isinstance(i, str):
exe_wrapper.append(i)
else:
if not i.found():
raise InterpreterException('Tried to use non-found executable.')
exe_wrapper += i.get_command()
timeout_multiplier = kwargs['timeout_multiplier']
if timeout_multiplier <= 0:
FeatureNew('add_test_setup() timeout_multiplier <= 0', '0.57.0').use(self.subproject)
if kwargs['is_default']:
if self.build.test_setup_default_name is not None:
raise InterpreterException(f'{self.build.test_setup_default_name!r} is already set as default. '
'is_default can be set to true only once')
self.build.test_setup_default_name = setup_name
self.build.test_setups[setup_name] = build.TestSetup(exe_wrapper, kwargs['gdb'], timeout_multiplier, kwargs['env'],
kwargs['exclude_suites'])
@typed_pos_args('add_global_arguments', varargs=str)
@typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_global_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_global_arguments(node, self.build.global_args[kwargs['native']], args[0], kwargs)
@typed_pos_args('add_global_link_arguments', varargs=str)
@typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_global_link_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_global_arguments(node, self.build.global_link_args[kwargs['native']], args[0], kwargs)
@typed_pos_args('add_project_arguments', varargs=str)
@typed_kwargs('add_project_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_project_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_project_arguments(node, self.build.projects_args[kwargs['native']], args[0], kwargs)
@typed_pos_args('add_project_link_arguments', varargs=str)
@typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_project_link_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_project_arguments(node, self.build.projects_link_args[kwargs['native']], args[0], kwargs)
def _warn_about_builtin_args(self, args: T.List[str]) -> None:
# -Wpedantic is deliberately not included, since some people want to use it but not use -Wextra
# see e.g.
# https://github.com/mesonbuild/meson/issues/3275#issuecomment-641354956
# https://github.com/mesonbuild/meson/issues/3742
warnargs = ('/W1', '/W2', '/W3', '/W4', '/Wall', '-Wall', '-Wextra')
optargs = ('-O0', '-O2', '-O3', '-Os', '-Oz', '/O1', '/O2', '/Os')
for arg in args:
if arg in warnargs:
mlog.warning(f'Consider using the built-in warning_level option instead of using "{arg}".',
location=self.current_node)
elif arg in optargs:
mlog.warning(f'Consider using the built-in optimization level instead of using "{arg}".',
location=self.current_node)
elif arg == '-Werror':
mlog.warning(f'Consider using the built-in werror option instead of using "{arg}".',
location=self.current_node)
elif arg == '-g':
mlog.warning(f'Consider using the built-in debug option instead of using "{arg}".',
location=self.current_node)
elif arg.startswith('-fsanitize'):
mlog.warning(f'Consider using the built-in option for sanitizers instead of using "{arg}".',
location=self.current_node)
elif arg.startswith('-std=') or arg.startswith('/std:'):
mlog.warning(f'Consider using the built-in option for language standard version instead of using "{arg}".',
location=self.current_node)
def _add_global_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.List[str]],
args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
if self.is_subproject():
msg = f'Function \'{node.func_name}\' cannot be used in subprojects because ' \
'there is no way to make that reliable.\nPlease only call ' \
'this if is_subproject() returns false. Alternatively, ' \
'define a variable that\ncontains your language-specific ' \
'arguments and add it to the appropriate *_args kwarg ' \
'in each target.'
raise InvalidCode(msg)
frozen = self.project_args_frozen or self.global_args_frozen
self._add_arguments(node, argsdict, frozen, args, kwargs)
def _add_project_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.Dict[str, T.List[str]]],
args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
if self.subproject not in argsdict:
argsdict[self.subproject] = {}
self._add_arguments(node, argsdict[self.subproject],
self.project_args_frozen, args, kwargs)
def _add_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.List[str]],
args_frozen: bool, args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
if args_frozen:
msg = f'Tried to use \'{node.func_name}\' after a build target has been declared.\n' \
'This is not permitted. Please declare all arguments before your targets.'
raise InvalidCode(msg)
self._warn_about_builtin_args(args)
for lang in kwargs['language']:
argsdict[lang] = argsdict.get(lang, []) + args
@noArgsFlattening
@typed_pos_args('environment', optargs=[(str, list, dict)])
@typed_kwargs('environment', ENV_METHOD_KW, ENV_SEPARATOR_KW.evolve(since='0.62.0'))
def func_environment(self, node: mparser.FunctionNode, args: T.Tuple[T.Union[None, str, T.List['TYPE_var'], T.Dict[str, 'TYPE_var']]],
kwargs: 'TYPE_kwargs') -> build.EnvironmentVariables:
init = args[0]
if init is not None:
FeatureNew.single_use('environment positional arguments', '0.52.0', self.subproject, location=node)
msg = ENV_KW.validator(init)
if msg:
raise InvalidArguments(f'"environment": {msg}')
if isinstance(init, dict) and any(i for i in init.values() if isinstance(i, list)):
FeatureNew.single_use('List of string in dictionary value', '0.62.0', self.subproject, location=node)
return env_convertor_with_method(init, kwargs['method'], kwargs['separator'])
return build.EnvironmentVariables()
@typed_pos_args('join_paths', varargs=str, min_varargs=1)
@noKwargs
def func_join_paths(self, node: mparser.BaseNode, args: T.Tuple[T.List[str]], kwargs: 'TYPE_kwargs') -> str:
return os.path.join(*args[0]).replace('\\', '/')
def run(self) -> None:
super().run()
mlog.log('Build targets in project:', mlog.bold(str(len(self.build.targets))))
FeatureNew.report(self.subproject)
FeatureDeprecated.report(self.subproject)
if not self.is_subproject():
self.print_extra_warnings()
if self.subproject == '':
self._print_summary()
def print_extra_warnings(self) -> None:
# TODO cross compilation
for c in self.coredata.compilers.host.values():
if c.get_id() == 'clang':
self.check_clang_asan_lundef()
break
def check_clang_asan_lundef(self) -> None:
if OptionKey('b_lundef') not in self.coredata.options:
return
if OptionKey('b_sanitize') not in self.coredata.options:
return
if (self.coredata.options[OptionKey('b_lundef')].value and
self.coredata.options[OptionKey('b_sanitize')].value != 'none'):
mlog.warning('''Trying to use {} sanitizer on Clang with b_lundef.
This will probably not work.
Try setting b_lundef to false instead.'''.format(self.coredata.options[OptionKey('b_sanitize')].value),
location=self.current_node)
# Check that the indicated file is within the same subproject
# as we currently are. This is to stop people doing
# nasty things like:
#
# f = files('../../master_src/file.c')
#
# Note that this is validated only when the file
# object is generated. The result can be used in a different
# subproject than it is defined in (due to e.g. a
# declare_dependency).
def validate_within_subproject(self, subdir, fname):
srcdir = Path(self.environment.source_dir)
norm = Path(srcdir, subdir, fname).resolve()
if os.path.isdir(norm):
inputtype = 'directory'
else:
inputtype = 'file'
if srcdir not in norm.parents:
# Grabbing files outside the source tree is ok.
# This is for vendor stuff like:
#
# /opt/vendorsdk/src/file_with_license_restrictions.c
return
project_root = Path(srcdir, self.root_subdir)
subproject_dir = project_root / self.subproject_dir
if norm == project_root:
return
if project_root not in norm.parents:
raise InterpreterException(f'Sandbox violation: Tried to grab {inputtype} {norm.name} outside current (sub)project.')
if subproject_dir == norm or subproject_dir in norm.parents:
raise InterpreterException(f'Sandbox violation: Tried to grab {inputtype} {norm.name} from a nested subproject.')
@T.overload
def source_strings_to_files(self, sources: T.List['mesonlib.FileOrString'], strict: bool = True) -> T.List['mesonlib.File']: ...
@T.overload
def source_strings_to_files(self, sources: T.List['mesonlib.FileOrString'], strict: bool = False) -> T.List['mesonlib.FileOrString']: ... # noqa: F811
@T.overload
def source_strings_to_files(self, sources: T.List['SourceInputs'], strict: bool = True) -> T.List['SourceOutputs']: ... # noqa: F811
def source_strings_to_files(self, sources: T.List['SourceInputs'], strict: bool = True) -> T.List['SourceOutputs']: # noqa: F811
"""Lower inputs to a list of Targets and Files, replacing any strings.
:param sources: A raw (Meson DSL) list of inputs (targets, files, and
strings)
:raises InterpreterException: if any of the inputs are of an invalid type
:return: A list of Targets and Files
"""
mesonlib.check_direntry_issues(sources)
if not isinstance(sources, list):
sources = [sources]
results: T.List['SourceOutputs'] = []
for s in sources:
if isinstance(s, str):
if not strict and s.startswith(self.environment.get_build_dir()):
results.append(s)
mlog.warning(f'Source item {s!r} cannot be converted to File object, because it is a generated file. '
'This will become a hard error in the future.', location=self.current_node)
else:
self.validate_within_subproject(self.subdir, s)
results.append(mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s))
elif isinstance(s, mesonlib.File):
results.append(s)
elif isinstance(s, (build.GeneratedList, build.BuildTarget,
build.CustomTargetIndex, build.CustomTarget,
build.ExtractedObjects, build.StructuredSources)):
results.append(s)
else:
raise InterpreterException(f'Source item is {s!r} instead of '
'string or File-type object')
return results
def add_target(self, name, tobj):
if name == '':
raise InterpreterException('Target name must not be empty.')
if name.strip() == '':
raise InterpreterException('Target name must not consist only of whitespace.')
if has_path_sep(name):
pathseg = os.path.join(self.subdir, os.path.split(name)[0])
if os.path.exists(os.path.join(self.source_root, pathseg)):
raise InvalidArguments(textwrap.dedent(f'''\
Target "{name}" has a path segment pointing to directory "{pathseg}". This is an error.
To define a target that builds in that directory you must define it
in the meson.build file in that directory.
'''))
if name.startswith('meson-'):
raise InvalidArguments("Target names starting with 'meson-' are reserved "
"for Meson's internal use. Please rename.")
if name in coredata.FORBIDDEN_TARGET_NAMES:
raise InvalidArguments(f"Target name '{name}' is reserved for Meson's "
"internal use. Please rename.")
# To permit an executable and a shared library to have the
# same name, such as "foo.exe" and "libfoo.a".
idname = tobj.get_id()
if idname in self.build.targets:
raise InvalidCode(f'Tried to create target "{name}", but a target of that name already exists.')
self.build.targets[idname] = tobj
if idname not in self.coredata.target_guids:
self.coredata.target_guids[idname] = str(uuid.uuid4()).upper()
@FeatureNew('both_libraries', '0.46.0')
def build_both_libraries(self, node, args, kwargs):
shared_lib = self.build_target(node, args, kwargs, build.SharedLibrary)
# Check if user forces non-PIC static library.
pic = True
key = OptionKey('b_staticpic')
if 'pic' in kwargs:
pic = kwargs['pic']
elif key in self.environment.coredata.options:
pic = self.environment.coredata.options[key].value
if self.backend.name == 'xcode':
# Xcode is a bit special in that you can't (at least for the moment)
# form a library only from object file inputs. The simple but inefficient
# solution is to use the sources directly. This will lead to them being
# built twice. This is unfortunate and slow, but at least it works.
# Feel free to submit patches to get this fixed if it is an
# issue for you.
reuse_object_files = False
else:
reuse_object_files = pic
if reuse_object_files:
# Exclude sources from args and kwargs to avoid building them twice
static_args = [args[0]]
static_kwargs = kwargs.copy()
static_kwargs['sources'] = []
static_kwargs['objects'] = shared_lib.extract_all_objects()
else:
static_args = args
static_kwargs = kwargs
static_lib = self.build_target(node, static_args, static_kwargs, build.StaticLibrary)
return build.BothLibraries(shared_lib, static_lib)
def build_library(self, node, args, kwargs):
default_library = self.coredata.get_option(OptionKey('default_library', subproject=self.subproject))
if default_library == 'shared':
return self.build_target(node, args, kwargs, build.SharedLibrary)
elif default_library == 'static':
return self.build_target(node, args, kwargs, build.StaticLibrary)
elif default_library == 'both':
return self.build_both_libraries(node, args, kwargs)
else:
raise InterpreterException(f'Unknown default_library value: {default_library}.')
def build_target(self, node: mparser.BaseNode, args, kwargs, targetclass):
@FeatureNewKwargs('build target', '0.42.0', ['rust_crate_type', 'build_rpath', 'implicit_include_directories'])
@FeatureNewKwargs('build target', '0.41.0', ['rust_args'])
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@FeatureNewKwargs('build target', '0.48.0', ['gnu_symbol_visibility'])
def build_target_decorator_caller(self, node, args, kwargs):
return True
build_target_decorator_caller(self, node, args, kwargs)
if not args:
raise InterpreterException('Target does not have a name.')
name, *sources = args
for_machine = self.machine_from_native_kwarg(kwargs)
if 'sources' in kwargs:
sources += listify(kwargs['sources'])
sources = self.source_strings_to_files(sources)
objs = extract_as_list(kwargs, 'objects')
kwargs['dependencies'] = extract_as_list(kwargs, 'dependencies')
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
if 'extra_files' in kwargs:
ef = extract_as_list(kwargs, 'extra_files')
kwargs['extra_files'] = self.source_strings_to_files(ef)
self.check_sources_exist(os.path.join(self.source_root, self.subdir), sources)
if targetclass not in {build.Executable, build.SharedLibrary, build.SharedModule, build.StaticLibrary, build.Jar}:
mlog.debug('Unknown target type:', str(targetclass))
raise RuntimeError('Unreachable code')
self.kwarg_strings_to_includedirs(kwargs)
# Filter out kwargs from other target types. For example 'soversion'
# passed to library() when default_library == 'static'.
kwargs = {k: v for k, v in kwargs.items() if k in targetclass.known_kwargs}
srcs: T.List['SourceInputs'] = []
struct: T.Optional[build.StructuredSources] = build.StructuredSources()
for s in sources:
if isinstance(s, build.StructuredSources):
struct = struct + s
else:
srcs.append(s)
if not struct:
struct = None
else:
# Validate that we won't end up with two outputs with the same name.
# i.e, don't allow:
# [structured_sources('foo/bar.rs'), structured_sources('bar/bar.rs')]
for v in struct.sources.values():
outputs: T.Set[str] = set()
for f in v:
o: T.List[str]
if isinstance(f, str):
o = [os.path.basename(f)]
elif isinstance(f, mesonlib.File):
o = [f.fname]
else:
o = f.get_outputs()
conflicts = outputs.intersection(o)
if conflicts:
raise InvalidArguments.from_node(
f"Conflicting sources in structured sources: {', '.join(sorted(conflicts))}",
node=node)
outputs.update(o)
kwargs['include_directories'] = self.extract_incdirs(kwargs)
target = targetclass(name, self.subdir, self.subproject, for_machine, srcs, struct, objs, self.environment, kwargs)
target.project_version = self.project_version
self.add_stdlib_info(target)
self.add_target(name, target)
self.project_args_frozen = True
return target
def kwarg_strings_to_includedirs(self, kwargs):
if 'd_import_dirs' in kwargs:
items = mesonlib.extract_as_list(kwargs, 'd_import_dirs')
cleaned_items = []
for i in items:
if isinstance(i, str):
# BW compatibility. This was permitted so we must support it
# for a few releases so people can transition to "correct"
# path declarations.
if os.path.normpath(i).startswith(self.environment.get_source_dir()):
mlog.warning('''Building a path to the source dir is not supported. Use a relative path instead.
This will become a hard error in the future.''', location=self.current_node)
i = os.path.relpath(i, os.path.join(self.environment.get_source_dir(), self.subdir))
i = self.build_incdir_object([i])
cleaned_items.append(i)
kwargs['d_import_dirs'] = cleaned_items
def get_used_languages(self, target):
result = set()
for i in target.sources:
for lang, c in self.coredata.compilers[target.for_machine].items():
if c.can_compile(i):
result.add(lang)
break
return result
def add_stdlib_info(self, target):
for l in self.get_used_languages(target):
dep = self.build.stdlibs[target.for_machine].get(l, None)
if dep:
target.add_deps(dep)
def check_sources_exist(self, subdir, sources):
for s in sources:
if not isinstance(s, str):
continue # This means a generated source and they always exist.
fname = os.path.join(subdir, s)
if not os.path.isfile(fname):
raise InterpreterException(f'Tried to add non-existing source file {s}.')
# Only permit object extraction from the same subproject
def validate_extraction(self, buildtarget: mesonlib.HoldableObject) -> None:
if self.subproject != buildtarget.subproject:
raise InterpreterException('Tried to extract objects from a different subproject.')
def is_subproject(self) -> bool:
return self.subproject != ''
@typed_pos_args('set_variable', str, object)
@noKwargs
@noArgsFlattening
@noSecondLevelHolderResolving
def func_set_variable(self, node: mparser.BaseNode, args: T.Tuple[str, object], kwargs: 'TYPE_kwargs') -> None:
varname, value = args
self.set_variable(varname, value, holderify=True)
@typed_pos_args('get_variable', (str, Disabler), optargs=[object])
@noKwargs
@noArgsFlattening
@unholder_return
def func_get_variable(self, node: mparser.BaseNode, args: T.Tuple[T.Union[str, Disabler], T.Optional[object]],
kwargs: 'TYPE_kwargs') -> 'TYPE_var':
varname, fallback = args
if isinstance(varname, Disabler):
return varname
try:
return self.variables[varname]
except KeyError:
if fallback is not None:
return self._holderify(fallback)
raise InterpreterException(f'Tried to get unknown variable "{varname}".')
@typed_pos_args('is_variable', str)
@noKwargs
def func_is_variable(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'TYPE_kwargs') -> bool:
return args[0] in self.variables
@FeatureNew('unset_variable', '0.60.0')
@typed_pos_args('unset_variable', str)
@noKwargs
def func_unset_variable(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'TYPE_kwargs') -> None:
varname = args[0]
try:
del self.variables[varname]
except KeyError:
raise InterpreterException(f'Tried to unset unknown variable "{varname}".')
@staticmethod
def machine_from_native_kwarg(kwargs: T.Dict[str, T.Any]) -> MachineChoice:
native = kwargs.get('native', False)
if not isinstance(native, bool):
raise InvalidArguments('Argument to "native" must be a boolean.')
return MachineChoice.BUILD if native else MachineChoice.HOST
@FeatureNew('is_disabler', '0.52.0')
@typed_pos_args('is_disabler', object)
@noKwargs
def func_is_disabler(self, node: mparser.BaseNode, args: T.Tuple[object], kwargs: 'TYPE_kwargs') -> bool:
return isinstance(args[0], Disabler)
@noKwargs
@FeatureNew('range', '0.58.0')
@typed_pos_args('range', int, optargs=[int, int])
def func_range(self, node, args: T.Tuple[int, T.Optional[int], T.Optional[int]], kwargs: T.Dict[str, T.Any]) -> P_OBJ.RangeHolder:
start, stop, step = args
# Just like Python's range, we allow range(stop), range(start, stop), or
# range(start, stop, step)
if stop is None:
stop = start
start = 0
if step is None:
step = 1
# This is more strict than Python's range()
if start < 0:
raise InterpreterException('start cannot be negative')
if stop < start:
raise InterpreterException('stop cannot be less than start')
if step < 1:
raise InterpreterException('step must be >=1')
return P_OBJ.RangeHolder(start, stop, step, subproject=self.subproject)
| mesonbuild/interpreter/interpreter.py | 152,734 | Checks for additional invalid values in a custom_target output.
This cannot be done with typed_kwargs because it requires the number of
inputs.
Adds one additional mapping to the `holder_map`.
The intended use for this function is in the `initialize` method of
modules to register custom object holders.
Build a mapping of `HoldableObject` types to their corresponding
`ObjectHolder`s. This mapping is used in `InterpreterBase` to automatically
holderify all returned values from methods and functions.
Just the implementation with no validation.
Lower inputs to a list of Targets and Files, replacing any strings.
:param sources: A raw (Meson DSL) list of inputs (targets, files, and
strings)
:raises InterpreterException: if any of the inputs are of an invalid type
:return: A list of Targets and Files
Copyright 2012-2021 The Meson development team Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Input source types passed to Targets Input source types passed to the build.Target classes newline newline TODO: env needs reworks of the way the environment variable holder itself works probably yes, a list of empty string Subproject directory is usually the name of the subproject, but can be different for dependencies provided by wrap files. type: T.Set[str] implies self.project_args_frozen Passed from the outside, only used in subprojects. build_def_files needs to be defined before parse_project is called For non-meson subprojects, we'll be using the ast. Even if it does exist we don't want to add a dependency on it, it's autogenerated from the actual build files, and is just for reference. Re-initialize machine descriptions. We can do a better job now because we have the compilers needed to gain more knowledge, so wipe out old inference and start over. Primitives Meson types FIXME: This is special cased and not ideal: The first source is our new VapiTarget, the rest are deps Use relative path for files within source directory, and absolute path for system files. Skip files within build directory. Also skip not regular files (e.g. /dev/stdout) Normalize the path to avoid duplicates, this is especially important to convert '/' to '\' on Windows. This is a Windows Store link which we can't resolve, so just do our best otherwise. check if stable module exists XXX: this is actually not helpful, since it doesn't do a version check Used by declare_dependency() and pkgconfig.generate() Executables aren't actually accepted, but we allow them here to allow for better error messages when overridden Prefer scripts in the current source directory If any file that was used as an argument to the command changes, we must re-run the configuration step. Invalid code is always an error Suppress the 'ERROR:' prefix because this exception is not fatal and VS CI treat any logs with "ERROR:" as fatal. Those lists are shared by all interpreters. That means that even if the subproject fails, any modification that the subproject made to those lists will affect the parent project. Duplicates are possible when subproject uses files from project root We always need the subi.build_def_files, to propgate sub-sub-projects Generate a meson ast and execute it with the normal do_subproject_meson Debug print the generated meson file Get class name, then option type as a string This is not a hard error to avoid dependency hell, the workaround when this happens is to simply set the subproject's option directly. The backend is already set when parsing subprojects Only init backend options on first invocation otherwise it would override values previously set from command line. This needs to be evaluated as early as possible, as meson uses this for things like deprecation testing. Do not set default_options on reconfigure otherwise it would override values previously set from command line. That means that changing default_options in a project will trigger a reconfigure but won't have any effect. If this is the first invocation we always need to initialize builtins, if this is a subproject that is new in a re-invocation we need to initialize builtins for that spdirname is the subproject_dir for this project, relative to self.subdir. self.subproject_dir is the subproject_dir for the main project, relative to top source dir. Load wrap files from this (sub)project. We have to activate VS before adding languages and before calling self.set_backend() otherwise it wouldn't be able to detect which vs backend version we need. But after setting default_options in case the project sets vs backend by default. absent 'native' means 'both' for backwards compatibility Add automatic 'Supbrojects' section in main project. Add automatic section with all user defined options Print all summaries, main project last. newline We'd really like to add cython's default language here, but it can't actually be done because the cython compiler hasn't been initialized, so we can't actually get the option yet. Because we can't know what compiler to add by default, and we don't want to add unnecessary compilers we don't add anything for cython here, and instead do it When the first cython target using a particular language is used. Always points to a local (i.e. self generated) file. Search for scripts relative to current subdir. Do not cache found programs because find_program('foobar') might give different results when run from different source dirs. TODO update modules to always pass `for_machine`. It is bad-form to assume the host machine. Only store successful lookups When adding kwargs, please check if they make sense in dependencies.get_dep_identifier() Replace '' by empty list of names Ensure the correct include type Cannot use the COMMAND_KW because command is allowed to be empty default regex selector for custom command: use complete output executing this cmd will fail in vcstagger.py and force to use the fallback string vcstagger.py parameters: infile, outfile, fallback, source_dir, replace_string, regex_selector, command... Don't mutate the kwargs Remap build_always to build_by_default and build_always_stale These are are nullaable so that we can know whether they're explicitly set or not. If they haven't been overwritten, set them to their true default name will default to first output, but we cannot do that yet because they could need substitutions (e.g. @BASENAME@) first. CustomTarget() will take care of setting a proper default but name must be an empty string in the meantime. We just need to narrow this, because the input is limited to files and Strings as inputs, so only Files will be returned Validation while creating the SymlinkData object We skip any arguments that are set to `false` Cannot use shared implementation until None backwards compat is dropped Validate input Validate output Perform the appropriate action We use absolute paths for input and output here because the cwd that the command is run from is 'unspecified', so it could change. Currently it's builddir/subdir for in_builddir else srcdir/subdir. Substitute @INPUT@, @OUTPUT@, etc here. Install file if requested, we check for the empty string for backwards compatibility. That was the behaviour before 0.45.0 so preserve it. -Wpedantic is deliberately not included, since some people want to use it but not use -Wextra see e.g. https://github.com/mesonbuild/meson/issues/3275issuecomment-641354956 https://github.com/mesonbuild/meson/issues/3742 TODO cross compilation Check that the indicated file is within the same subproject as we currently are. This is to stop people doing nasty things like: f = files('../../master_src/file.c') Note that this is validated only when the file object is generated. The result can be used in a different subproject than it is defined in (due to e.g. a declare_dependency). Grabbing files outside the source tree is ok. This is for vendor stuff like: /opt/vendorsdk/src/file_with_license_restrictions.c noqa: F811 noqa: F811 noqa: F811 To permit an executable and a shared library to have the same name, such as "foo.exe" and "libfoo.a". Check if user forces non-PIC static library. Xcode is a bit special in that you can't (at least for the moment) form a library only from object file inputs. The simple but inefficient solution is to use the sources directly. This will lead to them being built twice. This is unfortunate and slow, but at least it works. Feel free to submit patches to get this fixed if it is an issue for you. Exclude sources from args and kwargs to avoid building them twice Filter out kwargs from other target types. For example 'soversion' passed to library() when default_library == 'static'. Validate that we won't end up with two outputs with the same name. i.e, don't allow: [structured_sources('foo/bar.rs'), structured_sources('bar/bar.rs')] BW compatibility. This was permitted so we must support it for a few releases so people can transition to "correct" path declarations. This means a generated source and they always exist. Only permit object extraction from the same subproject Just like Python's range, we allow range(stop), range(start, stop), or range(start, stop, step) This is more strict than Python's range() | 9,602 | en | 0.874736 |
# -*- coding: utf-8 -*-
"""
.. _training-example:
Train Your Own Neural Network Potential
=======================================
This example shows how to use TorchANI to train a neural network potential
with the setup identical to NeuroChem. We will use the same configuration as
specified in `inputtrain.ipt`_
.. _`inputtrain.ipt`:
https://github.com/aiqm/torchani/blob/master/torchani/resources/ani-1x_8x/inputtrain.ipt
.. note::
TorchANI provide tools to run NeuroChem training config file `inputtrain.ipt`.
See: :ref:`neurochem-training`.
"""
###############################################################################
# To begin with, let's first import the modules and setup devices we will use:
import torch
import torchani
import os
import math
import torch.utils.tensorboard
import tqdm
# helper function to convert energy unit from Hartree to kcal/mol
from torchani.units import hartree2kcalmol
# device to run the training
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
###############################################################################
# Now let's setup constants and construct an AEV computer. These numbers could
# be found in `rHCNO-5.2R_16-3.5A_a4-8.params`
# The atomic self energies given in `sae_linfit.dat`_ are computed from ANI-1x
# dataset. These constants can be calculated for any given dataset if ``None``
# is provided as an argument to the object of :class:`EnergyShifter` class.
#
# .. note::
#
# Besides defining these hyperparameters programmatically,
# :mod:`torchani.neurochem` provide tools to read them from file.
#
# .. _rHCNO-5.2R_16-3.5A_a4-8.params:
# https://github.com/aiqm/torchani/blob/master/torchani/resources/ani-1x_8x/rHCNO-5.2R_16-3.5A_a4-8.params
# .. _sae_linfit.dat:
# https://github.com/aiqm/torchani/blob/master/torchani/resources/ani-1x_8x/sae_linfit.dat
Rcr = 5.2000e+00
Rca = 3.5000e+00
EtaR = torch.tensor([1.6000000e+01], device=device)
ShfR = torch.tensor([9.0000000e-01, 1.1687500e+00, 1.4375000e+00, 1.7062500e+00, 1.9750000e+00, 2.2437500e+00, 2.5125000e+00, 2.7812500e+00, 3.0500000e+00, 3.3187500e+00, 3.5875000e+00, 3.8562500e+00, 4.1250000e+00, 4.3937500e+00, 4.6625000e+00, 4.9312500e+00], device=device)
Zeta = torch.tensor([3.2000000e+01], device=device)
ShfZ = torch.tensor([1.9634954e-01, 5.8904862e-01, 9.8174770e-01, 1.3744468e+00, 1.7671459e+00, 2.1598449e+00, 2.5525440e+00, 2.9452431e+00], device=device)
EtaA = torch.tensor([8.0000000e+00], device=device)
ShfA = torch.tensor([9.0000000e-01, 1.5500000e+00, 2.2000000e+00, 2.8500000e+00], device=device)
num_species = 4
aev_computer = torchani.AEVComputer(Rcr, Rca, EtaR, ShfR, EtaA, Zeta, ShfA, ShfZ, num_species)
energy_shifter = torchani.utils.EnergyShifter(None)
species_to_tensor = torchani.utils.ChemicalSymbolsToInts(['H', 'C', 'N', 'O'])
###############################################################################
# Now let's setup datasets. These paths assumes the user run this script under
# the ``examples`` directory of TorchANI's repository. If you download this
# script, you should manually set the path of these files in your system before
# this script can run successfully.
#
# Also note that we need to subtracting energies by the self energies of all
# atoms for each molecule. This makes the range of energies in a reasonable
# range. The second argument defines how to convert species as a list of string
# to tensor, that is, for all supported chemical symbols, which is correspond to
# ``0``, which correspond to ``1``, etc.
try:
path = os.path.dirname(os.path.realpath(__file__))
except NameError:
path = os.getcwd()
dspath = os.path.join(path, '../dataset/ani1-up_to_gdb4/ani_gdb_s01.h5')
batch_size = 2560
training, validation = torchani.data.load(dspath).subtract_self_energies(energy_shifter).species_to_indices().shuffle().split(0.8, None)
training = training.collate(batch_size).cache()
validation = validation.collate(batch_size).cache()
print('Self atomic energies: ', energy_shifter.self_energies)
###############################################################################
# When iterating the dataset, we will get a dict of name->property mapping
#
###############################################################################
# Now let's define atomic neural networks.
H_network = torch.nn.Sequential(
torch.nn.Linear(384, 160),
torch.nn.CELU(0.1),
torch.nn.Linear(160, 128),
torch.nn.CELU(0.1),
torch.nn.Linear(128, 96),
torch.nn.CELU(0.1),
torch.nn.Linear(96, 1)
)
C_network = torch.nn.Sequential(
torch.nn.Linear(384, 144),
torch.nn.CELU(0.1),
torch.nn.Linear(144, 112),
torch.nn.CELU(0.1),
torch.nn.Linear(112, 96),
torch.nn.CELU(0.1),
torch.nn.Linear(96, 1)
)
N_network = torch.nn.Sequential(
torch.nn.Linear(384, 128),
torch.nn.CELU(0.1),
torch.nn.Linear(128, 112),
torch.nn.CELU(0.1),
torch.nn.Linear(112, 96),
torch.nn.CELU(0.1),
torch.nn.Linear(96, 1)
)
O_network = torch.nn.Sequential(
torch.nn.Linear(384, 128),
torch.nn.CELU(0.1),
torch.nn.Linear(128, 112),
torch.nn.CELU(0.1),
torch.nn.Linear(112, 96),
torch.nn.CELU(0.1),
torch.nn.Linear(96, 1)
)
nn = torchani.ANIModel([H_network, C_network, N_network, O_network])
print(nn)
###############################################################################
# Initialize the weights and biases.
#
# .. note::
# Pytorch default initialization for the weights and biases in linear layers
# is Kaiming uniform. See: `TORCH.NN.MODULES.LINEAR`_
# We initialize the weights similarly but from the normal distribution.
# The biases were initialized to zero.
#
# .. _TORCH.NN.MODULES.LINEAR:
# https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear
def init_params(m):
if isinstance(m, torch.nn.Linear):
torch.nn.init.kaiming_normal_(m.weight, a=1.0)
torch.nn.init.zeros_(m.bias)
nn.apply(init_params)
###############################################################################
# Let's now create a pipeline of AEV Computer --> Neural Networks.
model = torchani.nn.Sequential(aev_computer, nn).to(device)
###############################################################################
# Now let's setup the optimizers. NeuroChem uses Adam with decoupled weight decay
# to updates the weights and Stochastic Gradient Descent (SGD) to update the biases.
# Moreover, we need to specify different weight decay rate for different layes.
#
# .. note::
#
# The weight decay in `inputtrain.ipt`_ is named "l2", but it is actually not
# L2 regularization. The confusion between L2 and weight decay is a common
# mistake in deep learning. See: `Decoupled Weight Decay Regularization`_
# Also note that the weight decay only applies to weight in the training
# of ANI models, not bias.
#
# .. _Decoupled Weight Decay Regularization:
# https://arxiv.org/abs/1711.05101
AdamW = torchani.optim.AdamW([
# H networks
{'params': [H_network[0].weight]},
{'params': [H_network[2].weight], 'weight_decay': 0.00001},
{'params': [H_network[4].weight], 'weight_decay': 0.000001},
{'params': [H_network[6].weight]},
# C networks
{'params': [C_network[0].weight]},
{'params': [C_network[2].weight], 'weight_decay': 0.00001},
{'params': [C_network[4].weight], 'weight_decay': 0.000001},
{'params': [C_network[6].weight]},
# N networks
{'params': [N_network[0].weight]},
{'params': [N_network[2].weight], 'weight_decay': 0.00001},
{'params': [N_network[4].weight], 'weight_decay': 0.000001},
{'params': [N_network[6].weight]},
# O networks
{'params': [O_network[0].weight]},
{'params': [O_network[2].weight], 'weight_decay': 0.00001},
{'params': [O_network[4].weight], 'weight_decay': 0.000001},
{'params': [O_network[6].weight]},
])
SGD = torch.optim.SGD([
# H networks
{'params': [H_network[0].bias]},
{'params': [H_network[2].bias]},
{'params': [H_network[4].bias]},
{'params': [H_network[6].bias]},
# C networks
{'params': [C_network[0].bias]},
{'params': [C_network[2].bias]},
{'params': [C_network[4].bias]},
{'params': [C_network[6].bias]},
# N networks
{'params': [N_network[0].bias]},
{'params': [N_network[2].bias]},
{'params': [N_network[4].bias]},
{'params': [N_network[6].bias]},
# O networks
{'params': [O_network[0].bias]},
{'params': [O_network[2].bias]},
{'params': [O_network[4].bias]},
{'params': [O_network[6].bias]},
], lr=1e-3)
###############################################################################
# Setting up a learning rate scheduler to do learning rate decay
AdamW_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(AdamW, factor=0.5, patience=100, threshold=0)
SGD_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(SGD, factor=0.5, patience=100, threshold=0)
###############################################################################
# Train the model by minimizing the MSE loss, until validation RMSE no longer
# improves during a certain number of steps, decay the learning rate and repeat
# the same process, stop until the learning rate is smaller than a threshold.
#
# We first read the checkpoint files to restart training. We use `latest.pt`
# to store current training state.
latest_checkpoint = 'latest.pt'
###############################################################################
# Resume training from previously saved checkpoints:
if os.path.isfile(latest_checkpoint):
checkpoint = torch.load(latest_checkpoint)
nn.load_state_dict(checkpoint['nn'])
AdamW.load_state_dict(checkpoint['AdamW'])
SGD.load_state_dict(checkpoint['SGD'])
AdamW_scheduler.load_state_dict(checkpoint['AdamW_scheduler'])
SGD_scheduler.load_state_dict(checkpoint['SGD_scheduler'])
###############################################################################
# During training, we need to validate on validation set and if validation error
# is better than the best, then save the new best model to a checkpoint
def validate():
# run validation
mse_sum = torch.nn.MSELoss(reduction='sum')
total_mse = 0.0
count = 0
for properties in validation:
species = properties['species'].to(device)
coordinates = properties['coordinates'].to(device).float()
true_energies = properties['energies'].to(device).float()
_, predicted_energies = model((species, coordinates))
total_mse += mse_sum(predicted_energies, true_energies).item()
count += predicted_energies.shape[0]
return hartree2kcalmol(math.sqrt(total_mse / count))
###############################################################################
# We will also use TensorBoard to visualize our training process
tensorboard = torch.utils.tensorboard.SummaryWriter()
###############################################################################
# Finally, we come to the training loop.
#
# In this tutorial, we are setting the maximum epoch to a very small number,
# only to make this demo terminate fast. For serious training, this should be
# set to a much larger value
mse = torch.nn.MSELoss(reduction='none')
print("training starting from epoch", AdamW_scheduler.last_epoch + 1)
max_epochs = 10
early_stopping_learning_rate = 1.0E-5
best_model_checkpoint = 'best.pt'
for _ in range(AdamW_scheduler.last_epoch + 1, max_epochs):
rmse = validate()
print('RMSE:', rmse, 'at epoch', AdamW_scheduler.last_epoch + 1)
learning_rate = AdamW.param_groups[0]['lr']
if learning_rate < early_stopping_learning_rate:
break
# checkpoint
if AdamW_scheduler.is_better(rmse, AdamW_scheduler.best):
torch.save(nn.state_dict(), best_model_checkpoint)
AdamW_scheduler.step(rmse)
SGD_scheduler.step(rmse)
tensorboard.add_scalar('validation_rmse', rmse, AdamW_scheduler.last_epoch)
tensorboard.add_scalar('best_validation_rmse', AdamW_scheduler.best, AdamW_scheduler.last_epoch)
tensorboard.add_scalar('learning_rate', learning_rate, AdamW_scheduler.last_epoch)
for i, properties in tqdm.tqdm(
enumerate(training),
total=len(training),
desc="epoch {}".format(AdamW_scheduler.last_epoch)
):
species = properties['species'].to(device)
coordinates = properties['coordinates'].to(device).float()
true_energies = properties['energies'].to(device).float()
num_atoms = (species >= 0).sum(dim=1, dtype=true_energies.dtype)
_, predicted_energies = model((species, coordinates))
loss = (mse(predicted_energies, true_energies) / num_atoms.sqrt()).mean()
AdamW.zero_grad()
SGD.zero_grad()
loss.backward()
AdamW.step()
SGD.step()
# write current batch loss to TensorBoard
tensorboard.add_scalar('batch_loss', loss, AdamW_scheduler.last_epoch * len(training) + i)
torch.save({
'nn': nn.state_dict(),
'AdamW': AdamW.state_dict(),
'SGD': SGD.state_dict(),
'AdamW_scheduler': AdamW_scheduler.state_dict(),
'SGD_scheduler': SGD_scheduler.state_dict(),
}, latest_checkpoint)
| examples/nnp_training.py | 13,227 | .. _training-example:
Train Your Own Neural Network Potential
=======================================
This example shows how to use TorchANI to train a neural network potential
with the setup identical to NeuroChem. We will use the same configuration as
specified in `inputtrain.ipt`_
.. _`inputtrain.ipt`:
https://github.com/aiqm/torchani/blob/master/torchani/resources/ani-1x_8x/inputtrain.ipt
.. note::
TorchANI provide tools to run NeuroChem training config file `inputtrain.ipt`.
See: :ref:`neurochem-training`.
-*- coding: utf-8 -*- To begin with, let's first import the modules and setup devices we will use: helper function to convert energy unit from Hartree to kcal/mol device to run the training Now let's setup constants and construct an AEV computer. These numbers could be found in `rHCNO-5.2R_16-3.5A_a4-8.params` The atomic self energies given in `sae_linfit.dat`_ are computed from ANI-1x dataset. These constants can be calculated for any given dataset if ``None`` is provided as an argument to the object of :class:`EnergyShifter` class. .. note:: Besides defining these hyperparameters programmatically, :mod:`torchani.neurochem` provide tools to read them from file. .. _rHCNO-5.2R_16-3.5A_a4-8.params: https://github.com/aiqm/torchani/blob/master/torchani/resources/ani-1x_8x/rHCNO-5.2R_16-3.5A_a4-8.params .. _sae_linfit.dat: https://github.com/aiqm/torchani/blob/master/torchani/resources/ani-1x_8x/sae_linfit.dat Now let's setup datasets. These paths assumes the user run this script under the ``examples`` directory of TorchANI's repository. If you download this script, you should manually set the path of these files in your system before this script can run successfully. Also note that we need to subtracting energies by the self energies of all atoms for each molecule. This makes the range of energies in a reasonable range. The second argument defines how to convert species as a list of string to tensor, that is, for all supported chemical symbols, which is correspond to ``0``, which correspond to ``1``, etc. When iterating the dataset, we will get a dict of name->property mapping Now let's define atomic neural networks. Initialize the weights and biases. .. note:: Pytorch default initialization for the weights and biases in linear layers is Kaiming uniform. See: `TORCH.NN.MODULES.LINEAR`_ We initialize the weights similarly but from the normal distribution. The biases were initialized to zero. .. _TORCH.NN.MODULES.LINEAR: https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.htmlLinear Let's now create a pipeline of AEV Computer --> Neural Networks. Now let's setup the optimizers. NeuroChem uses Adam with decoupled weight decay to updates the weights and Stochastic Gradient Descent (SGD) to update the biases. Moreover, we need to specify different weight decay rate for different layes. .. note:: The weight decay in `inputtrain.ipt`_ is named "l2", but it is actually not L2 regularization. The confusion between L2 and weight decay is a common mistake in deep learning. See: `Decoupled Weight Decay Regularization`_ Also note that the weight decay only applies to weight in the training of ANI models, not bias. .. _Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 H networks C networks N networks O networks H networks C networks N networks O networks Setting up a learning rate scheduler to do learning rate decay Train the model by minimizing the MSE loss, until validation RMSE no longer improves during a certain number of steps, decay the learning rate and repeat the same process, stop until the learning rate is smaller than a threshold. We first read the checkpoint files to restart training. We use `latest.pt` to store current training state. Resume training from previously saved checkpoints: During training, we need to validate on validation set and if validation error is better than the best, then save the new best model to a checkpoint run validation We will also use TensorBoard to visualize our training process Finally, we come to the training loop. In this tutorial, we are setting the maximum epoch to a very small number, only to make this demo terminate fast. For serious training, this should be set to a much larger value checkpoint write current batch loss to TensorBoard | 4,336 | en | 0.843918 |
""" A QuoteController Module """
from masonite.controllers import Controller
from masonite.request import Request
from app.Quote import Quote
class QuoteController(Controller):
def __init__(self, request: Request):
self.request = request
def show(self):
id = self.request.param("id")
return Quote.find(id)
def index(self):
return Quote.all()
def create(self):
subject = self.request.input("subject")
quote = Quote.create({"subject": subject})
return quote
def update(self):
subject = self.request.input("subject")
id = self.request.param("id")
Quote.where("id", id).update({"subject": subject})
return Quote.where("id", id).get()
def destroy(self):
id = self.request.param("id")
quote = Quote.where("id", id).get()
Quote.where("id", id).delete()
return quote
| app/http/controllers/QuoteController.py | 910 | A QuoteController Module | 24 | en | 0.083958 |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from .chromatic import *
| enterprise_extensions/chromatic/__init__.py | 159 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
"""
Component to interface with various locks that can be controlled remotely.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/lock/
"""
import asyncio
from datetime import timedelta
import functools as ft
import logging
import os
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
ATTR_CODE, ATTR_CODE_FORMAT, ATTR_ENTITY_ID, STATE_LOCKED, STATE_UNLOCKED,
STATE_UNKNOWN, SERVICE_LOCK, SERVICE_UNLOCK)
from homeassistant.components import group
DOMAIN = 'lock'
SCAN_INTERVAL = timedelta(seconds=30)
ATTR_CHANGED_BY = 'changed_by'
GROUP_NAME_ALL_LOCKS = 'all locks'
ENTITY_ID_ALL_LOCKS = group.ENTITY_ID_FORMAT.format('all_locks')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
LOCK_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_CODE): cv.string,
})
_LOGGER = logging.getLogger(__name__)
def is_locked(hass, entity_id=None):
"""Return if the lock is locked based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_LOCKS
return hass.states.is_state(entity_id, STATE_LOCKED)
def lock(hass, entity_id=None, code=None):
"""Lock all or specified locks."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_LOCK, data)
def unlock(hass, entity_id=None, code=None):
"""Unlock all or specified locks."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_UNLOCK, data)
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for locks."""
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_LOCKS)
yield from component.async_setup(config)
@asyncio.coroutine
def async_handle_lock_service(service):
"""Handle calls to the lock services."""
target_locks = component.async_extract_from_service(service)
code = service.data.get(ATTR_CODE)
for entity in target_locks:
if service.service == SERVICE_LOCK:
yield from entity.async_lock(code=code)
else:
yield from entity.async_unlock(code=code)
update_tasks = []
for entity in target_locks:
if not entity.should_poll:
continue
update_coro = hass.loop.create_task(
entity.async_update_ha_state(True))
if hasattr(entity, 'async_update'):
update_tasks.append(update_coro)
else:
yield from update_coro
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
descriptions = yield from hass.loop.run_in_executor(
None, load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
hass.services.async_register(
DOMAIN, SERVICE_UNLOCK, async_handle_lock_service,
descriptions.get(SERVICE_UNLOCK), schema=LOCK_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_LOCK, async_handle_lock_service,
descriptions.get(SERVICE_LOCK), schema=LOCK_SERVICE_SCHEMA)
return True
class LockDevice(Entity):
"""Representation of a lock."""
@property
def changed_by(self):
"""Last change triggered by."""
return None
# pylint: disable=no-self-use
@property
def code_format(self):
"""Regex for code format or None if no code is required."""
return None
@property
def is_locked(self):
"""Return true if the lock is locked."""
return None
def lock(self, **kwargs):
"""Lock the lock."""
raise NotImplementedError()
def async_lock(self, **kwargs):
"""Lock the lock.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, ft.partial(self.lock, **kwargs))
def unlock(self, **kwargs):
"""Unlock the lock."""
raise NotImplementedError()
def async_unlock(self, **kwargs):
"""Unlock the lock.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, ft.partial(self.unlock, **kwargs))
@property
def state_attributes(self):
"""Return the state attributes."""
if self.code_format is None:
return None
state_attr = {
ATTR_CODE_FORMAT: self.code_format,
ATTR_CHANGED_BY: self.changed_by
}
return state_attr
@property
def state(self):
"""Return the state."""
locked = self.is_locked
if locked is None:
return STATE_UNKNOWN
return STATE_LOCKED if locked else STATE_UNLOCKED
| homeassistant/components/lock/__init__.py | 5,326 | Representation of a lock.
Handle calls to the lock services.
Lock the lock.
This method must be run in the event loop and returns a coroutine.
Track states and offer events for locks.
Unlock the lock.
This method must be run in the event loop and returns a coroutine.
Last change triggered by.
Regex for code format or None if no code is required.
Return if the lock is locked based on the statemachine.
Return true if the lock is locked.
Lock all or specified locks.
Lock the lock.
Return the state.
Return the state attributes.
Unlock all or specified locks.
Unlock the lock.
Component to interface with various locks that can be controlled remotely.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/lock/
noqa pylint: disable=no-self-use | 809 | en | 0.870836 |
"""
Created on Sun Feb 2 13:28:48 2020
@author: matias
"""
import numpy as np
from numpy.linalg import inv
from matplotlib import pyplot as plt
import time
import camb
from scipy.integrate import cumtrapz as cumtrapz
from scipy.integrate import simps as simps
from scipy.interpolate import interp1d
from scipy.constants import c as c_luz #metros/segundos
c_luz_km = c_luz/1000
import sys
import os
from os.path import join as osjoin
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/')
from funciones_int import Hubble_teorico
from funciones_AGN import zs_2_logDlH0
#%%
'''
DEPRECATED: Antes de eliminar este archivo copiar este ejemplo en otro .py
en donde se grafiquen los datos.
'''
if __name__ == '__main__':
from scipy.constants import c as c_luz #metros/segundos
from matplotlib import pyplot as plt
import sys
import os
from os.path import join as osjoin
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/')
from funciones_data import leer_data_AGN
#%%
def leer_data_AGN(archivo_AGN):
z, Fuv, eFuv, Fx, eFx = np.loadtxt(archivo_AGN,
usecols=(3,4,5,6,7), unpack=True)
arr1inds = z.argsort()
sorted_z = z[arr1inds]
sorted_Fuv = Fuv[arr1inds]
sorted_eFuv = eFuv[arr1inds]
sorted_Fx = Fx[arr1inds]
sorted_eFx = eFx[arr1inds]
return sorted_z, sorted_Fuv, sorted_eFuv, sorted_Fx, sorted_eFx
#Data AGN
os.chdir(path_git+'/Software/Estadística/Datos/Datos_AGN')
data_agn = leer_data_AGN('table3.dat')
H_0 = 70
omega_m = 0.99
gamma = 0.64
beta = 7
delta = 0.3
theta = [omega_m,beta,gamma,delta]
#params_to_chi2_AGN_nuisance(theta, _, data_agn)/(len(z_data)-4)
data_agn = leer_data_AGN('table3.dat')
z_data_1, logFuv_1, eFuv_1, logFx_1, eFx_1 = data_agn
zmin = 0
zmax = 100
mask = (z_data_1 > zmin) & (z_data_1 < zmax)
z_data = z_data_1[mask]
logFuv = logFuv_1[mask]
logFx = logFx_1[mask]
eFx = eFx_1[mask]
eFuv = eFuv_1[mask]
zs_modelo = np.linspace(0,30,10**6)
Dl_teo = -np.log10(H_0) + zs_2_logDlH0(zs_modelo,omega_m,z_data)
Dl_teo_cm = Dl_teo - np.log10(3.24) + 25
psi = beta + gamma * logFuv + 2 * (gamma-1) * (Dl_teo_cm + 0.5 * np.log10(4*np.pi))
si_2 = eFx**2 + (gamma * eFuv)**2 + np.exp(2*np.log(delta)) #El cuadrado de los errores
#si_2 = eFx**2 + (gamma * eFuv)**2 + delta**2 #El cuadrado de los errores
print(np.sum(si_2))
chi2_AGN = np.sum( ((logFx-psi)**2/si_2) + np.log(2*np.pi*si_2)) # menos en el paper
print(chi2_AGN)
print(chi2_AGN/(len(z_data)-4))
plt.figure()
plt.xlabel('z (redshift)')
plt.ylabel(r'$Fx$')
plt.errorbar(z_data,psi,np.sqrt(si_2),marker='.',linestyle='')
plt.plot(z_data,logFx,'.r')
| Software/Funcionales/funciones_LambdaCDM_AGN.py | 2,971 | Created on Sun Feb 2 13:28:48 2020
@author: matias
metros/segundos%%metros/segundos%%Data AGNparams_to_chi2_AGN_nuisance(theta, _, data_agn)/(len(z_data)-4)El cuadrado de los erroressi_2 = eFx**2 + (gamma * eFuv)**2 + delta**2 El cuadrado de los errores menos en el paper | 274 | es | 0.432128 |
# mypy: allow-untyped-defs
import subprocess
from functools import partial
from typing import Callable
from mozlog import get_default_logger
from wptserve.utils import isomorphic_decode
logger = None
def vcs(bin_name: str) -> Callable[..., None]:
def inner(command, *args, **kwargs):
global logger
if logger is None:
logger = get_default_logger("vcs")
repo = kwargs.pop("repo", None)
log_error = kwargs.pop("log_error", True)
stdout = kwargs.pop("stdout", None)
stdin = kwargs.pop("stdin", None)
if kwargs:
raise TypeError(kwargs)
args = list(args)
proc_kwargs = {}
if repo is not None:
# Make sure `cwd` is str type to work in different sub-versions of Python 3.
# Before 3.8, bytes were not accepted on Windows for `cwd`.
proc_kwargs["cwd"] = isomorphic_decode(repo)
if stdout is not None:
proc_kwargs["stdout"] = stdout
if stdin is not None:
proc_kwargs["stdin"] = stdin
command_line = [bin_name, command] + args
logger.debug(" ".join(command_line))
try:
func = subprocess.check_output if not stdout else subprocess.check_call
return func(command_line, stderr=subprocess.STDOUT, **proc_kwargs)
except OSError as e:
if log_error:
logger.error(e)
raise
except subprocess.CalledProcessError as e:
if log_error:
logger.error(e.output)
raise
return inner
git = vcs("git")
hg = vcs("hg")
def bind_to_repo(vcs_func, repo, log_error=True):
return partial(vcs_func, repo=repo, log_error=log_error)
def is_git_root(path, log_error=True):
try:
rv = git("rev-parse", "--show-cdup", repo=path, log_error=log_error)
except subprocess.CalledProcessError:
return False
return rv == b"\n"
| tools/wptrunner/wptrunner/vcs.py | 1,954 | mypy: allow-untyped-defs Make sure `cwd` is str type to work in different sub-versions of Python 3. Before 3.8, bytes were not accepted on Windows for `cwd`. | 157 | en | 0.947139 |
import socket
import struct
import os
PACKET_SIZE = 1024
TIME_OUT = 5
SUCCESS = b'File Has Been Transferred'
def getPayload(fileName):
try:
with open(file=fileName, mode="r+b") as readFile:
payload = readFile.read()
if len(payload) == 0:
print("That is a blank file.\nProgram now exiting ...")
exit()
return payload
except FileNotFoundError:
print("\nNo payload file.\nProgram now exiting ...")
exit()
def main():
# fileName = "test.txt"
# serverIP = "127.0.0.1"
# serverPort = 5005
fileName = input("Enter path of the file to be sent to the server:\n")
payload = getPayload(fileName=fileName)
print("File Found ...")
serverIP = input("\nEnter the IP Address of the server:\n")
if serverIP is None:
print("Cannot leave server IP address blank.\nProgram now exiting ...")
exit()
try:
serverPort = int(input("\nEnter the Port of the server:\n"))
except ValueError as ve:
print("Please provide a valid port number. Should only contain character 0-9.\nProgram now exiting ...")
exit()
if serverPort is None:
print("Cannot leave server port blank.\nProgram now exiting ...")
exit()
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(TIME_OUT)
sock.settimeout(TIME_OUT)
sock.connect((serverIP, serverPort))
print("\nTransferring File ...")
name = fileName.split("/")[-1]
nameBytes = name.encode("utf-8")
nameLength = len(nameBytes)
nameSizeBytes = struct.pack("I", nameLength)
payloadLength = len(payload) + 8 + nameLength
numPackets = (payloadLength // PACKET_SIZE)
if (payloadLength / PACKET_SIZE) > numPackets:
numPackets += 1
packedNumBytes = struct.pack('I', numPackets)
header = packedNumBytes + nameSizeBytes + nameBytes
payload = header + payload
sock.sendall(payload)
data = sock.recv(PACKET_SIZE)
print("\nStatus:")
print(data.decode("utf-8"))
print("\nProgram done ...")
except ConnectionRefusedError or ConnectionResetError as e:
print(f"\n{e} Error Occurred. Check for correct server IP Address and Ports. Check server status.\nProgram now exiting ...")
except Exception as e:
print(f"\n{e} error has broken things.")
if __name__ == '__main__':
os.system("clear")
main()
| Labs/Lab_4/Lab_4/client.py | 2,614 | fileName = "test.txt" serverIP = "127.0.0.1" serverPort = 5005 | 62 | en | 0.701525 |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: david@reciprocitylabs.com
# Maintained By: david@reciprocitylabs.com
import re
from sqlalchemy.orm import validates
from ggrc import db
from ggrc import settings
from ggrc.models.computed_property import computed_property
from ggrc.models.context import HasOwnContext
from ggrc.models.exceptions import ValidationError
from ggrc.models.mixins import deferred, Base, CustomAttributable
from ggrc.models.reflection import PublishOnly
from ggrc.models.relationship import Relatable
from ggrc.models.utils import validate_option
class Person(CustomAttributable, HasOwnContext, Relatable, Base, db.Model):
__tablename__ = 'people'
email = deferred(db.Column(db.String, nullable=False), 'Person')
name = deferred(db.Column(db.String), 'Person')
language_id = deferred(db.Column(db.Integer), 'Person')
company = deferred(db.Column(db.String), 'Person')
object_people = db.relationship(
'ObjectPerson', backref='person', cascade='all, delete-orphan')
object_owners = db.relationship(
'ObjectOwner', backref='person', cascade='all, delete-orphan')
language = db.relationship(
'Option',
primaryjoin='and_(foreign(Person.language_id) == Option.id, '
'Option.role == "person_language")',
uselist=False,
)
@staticmethod
def _extra_table_args(cls):
return (
db.Index('ix_people_name_email', 'name', 'email'),
db.Index('uq_people_email', 'email', unique=True),
)
_fulltext_attrs = [
'company',
'email',
'name',
]
_publish_attrs = [
'company',
'email',
'language',
'name',
PublishOnly('object_people'),
PublishOnly('system_wide_role'),
]
_sanitize_html = [
'company',
'name',
]
_include_links = []
_aliases = {
"name": "Name",
"email": {
"display_name": "Email",
"unique": True,
},
"company": "Company",
"user_role": {
"display_name": "Role",
"type": "user_role",
"filter_by": "_filter_by_user_role",
},
}
@classmethod
def _filter_by_user_role(cls, predicate):
from ggrc_basic_permissions.models import Role, UserRole
return UserRole.query.join(Role).filter(
(UserRole.person_id == cls.id) &
(UserRole.context_id == None) &
predicate(Role.name)
).exists()
# Methods required by Flask-Login
def is_authenticated(self):
return True
def is_active(self):
return True # self.active
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id) # noqa
@validates('language')
def validate_person_options(self, key, option):
return validate_option(self.__class__.__name__, key, option,
'person_language')
@validates('email')
def validate_email(self, key, email):
if not Person.is_valid_email(email):
message = "Must provide a valid email address"
raise ValidationError(message)
return email
@staticmethod
def is_valid_email(val):
# Borrowed from Django
# literal form, ipv4 address (SMTP 4.1.3)
email_re = re.compile(
'^[-!#$%&\'*+\\.\/0-9=?A-Z^_`{|}~]+@([-0-9A-Z]+\.)+([0-9A-Z]){2,4}$',
re.IGNORECASE)
return email_re.match(val) if val else False
@classmethod
def eager_query(cls):
from sqlalchemy import orm
# query = super(Person, cls).eager_query()
# Completely overriding eager_query to avoid eager loading of the
# modified_by relationship
return super(Person, cls).eager_query().options(
orm.joinedload('language'),
orm.subqueryload('object_people'),
)
def _display_name(self):
return self.email
@computed_property
def system_wide_role(self):
"""For choosing the role string to show to the user; of all the roles in
the system-wide context, it shows the highest ranked one (if there are
multiple) or "No Access" if there are none.
"""
# FIXME: This method should be in `ggrc_basic_permissions`, since it
# depends on `Role` and `UserRole` objects
if self.email in getattr(settings, "BOOTSTRAP_ADMIN_USERS", []):
return u"Superuser"
ROLE_HIERARCHY = {
u'gGRC Admin': 0,
u'Editor': 1,
u'Reader': 2,
u'Creator': 3,
}
system_wide_roles = ROLE_HIERARCHY.keys()
unique_roles = set([
user_role.role.name
for user_role in self.user_roles
if user_role.role.name in system_wide_roles
])
if len(unique_roles) == 0:
return u"No Access"
else:
# -1 as default to make items not in this list appear on top
# and thus shown to the user
sorted_roles = sorted(unique_roles,
key=lambda x: ROLE_HIERARCHY.get(x, -1))
return sorted_roles[0]
| src/ggrc/models/person.py | 4,962 | For choosing the role string to show to the user; of all the roles in
the system-wide context, it shows the highest ranked one (if there are
multiple) or "No Access" if there are none.
Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file> Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> Created By: david@reciprocitylabs.com Maintained By: david@reciprocitylabs.com Methods required by Flask-Login self.active noqa Borrowed from Django literal form, ipv4 address (SMTP 4.1.3) query = super(Person, cls).eager_query() Completely overriding eager_query to avoid eager loading of the modified_by relationship FIXME: This method should be in `ggrc_basic_permissions`, since it depends on `Role` and `UserRole` objects -1 as default to make items not in this list appear on top and thus shown to the user | 855 | en | 0.831161 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training functions for Gradient boosted decision trees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
from tensorflow.contrib import learn
from tensorflow.contrib import stateless
from tensorflow.contrib.boosted_trees.lib.learner.batch import categorical_split_handler
from tensorflow.contrib.boosted_trees.lib.learner.batch import ordinal_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.python.ops import batch_ops_utils
from tensorflow.contrib.boosted_trees.python.ops import gen_model_ops
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.ops import prediction_ops
from tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops
from tensorflow.contrib.boosted_trees.python.ops import training_ops
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import device_setter
# Key names for prediction dict.
ENSEMBLE_STAMP = "ensemble_stamp"
PREDICTIONS = "predictions"
PARTITION_IDS = "partition_ids"
NUM_LAYERS_ATTEMPTED = "num_layers"
NUM_TREES_ATTEMPTED = "num_trees"
NUM_USED_HANDLERS = "num_used_handlers"
USED_HANDLERS_MASK = "used_handlers_mask"
LEAF_INDEX = "leaf_index"
_FEATURE_NAME_TEMPLATE = "%s_%d"
# Keys in Training state.
GBDTTrainingState = collections.namedtuple("GBDTTrainingState", [
"num_layer_examples", "num_layer_steps", "num_layers", "active_tree",
"active_layer", "continue_centering", "bias_stats_accumulator",
"steps_accumulator", "handlers"
])
def _get_column_by_index(tensor, indices):
"""Returns columns from a 2-D tensor by index."""
shape = array_ops.shape(tensor)
p_flat = array_ops.reshape(tensor, [-1])
i_flat = array_ops.reshape(
array_ops.reshape(math_ops.range(0, shape[0]) * shape[1], [-1, 1]) +
indices, [-1])
return array_ops.reshape(array_ops.gather(p_flat, i_flat), [shape[0], -1])
def _make_predictions_dict(stamp,
logits,
partition_ids,
ensemble_stats,
used_handlers,
leaf_index=None):
"""Returns predictions for the given logits and n_classes.
Args:
stamp: The ensemble stamp.
logits: A rank 2 `Tensor` with shape [batch_size, n_classes - 1]. that
contains predictions when no dropout was applied.
partition_ids: A rank 1 `Tensor` with shape [batch_size].
ensemble_stats: A TreeEnsembleStatsOp result tuple.
used_handlers: A TreeEnsembleUsedHandlerOp result tuple of an int and a
boolean mask.
leaf_index: A rank 2 `Tensor` with shape [batch_size, number of trees]. that
contains leaf id for each example prediction.
Returns:
A dict of predictions.
"""
result = {}
result[ENSEMBLE_STAMP] = stamp
result[PREDICTIONS] = logits
result[PARTITION_IDS] = partition_ids
result[NUM_LAYERS_ATTEMPTED] = ensemble_stats.attempted_layers
result[NUM_TREES_ATTEMPTED] = ensemble_stats.attempted_trees
result[NUM_USED_HANDLERS] = used_handlers.num_used_handlers
result[USED_HANDLERS_MASK] = used_handlers.used_handlers_mask
if leaf_index is not None:
result[LEAF_INDEX] = leaf_index
return result
class _OpRoundRobinStrategy(object):
"""Returns the next ps task index for placement via per-Op round-robin order.
This strategy works slightly better for the GBDT graph because of using
custom resources which vary significantly in compute cost.
"""
def __init__(self, ps_ops, num_tasks):
"""Create a new `_RoundRobinStrategy`.
Args:
ps_ops: List of Op types to place on PS.
num_tasks: Number of ps tasks to cycle among.
"""
next_task = 0
self._next_task_per_op = {}
for op in ps_ops:
self._next_task_per_op[op] = next_task
next_task = (next_task + 1) % num_tasks if num_tasks else 0
self._num_tasks = num_tasks
def __call__(self, op):
"""Choose a ps task index for the given `Operation`.
Args:
op: An `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Returns the next
index, in the range `[offset, offset + num_tasks)`.
Raises:
ValueError: If attempting to place non-PS Op.
"""
if op.type not in self._next_task_per_op:
raise ValueError("Unknown op type '%s' for placement:" % op.type)
task = self._next_task_per_op[op.type]
self._next_task_per_op[op.type] = ((task + 1) % self._num_tasks
if self._num_tasks else 0)
return task
def extract_features(features, feature_columns, use_core_columns):
"""Extracts columns from a dictionary of features.
Args:
features: `dict` of `Tensor` objects.
feature_columns: A list of feature_columns.
Returns:
Seven values:
- A list of all feature column names.
- A list of dense floats.
- A list of sparse float feature indices.
- A list of sparse float feature values.
- A list of sparse float feature shapes.
- A list of sparse int feature indices.
- A list of sparse int feature values.
- A list of sparse int feature shapes.
Raises:
ValueError: if features is not valid.
"""
if not features:
raise ValueError("Features dictionary must be specified.")
# Make a shallow copy of features to ensure downstream usage
# is unaffected by modifications in the model function.
features = copy.copy(features)
if feature_columns:
scope = "gbdt"
with variable_scope.variable_scope(scope):
feature_columns = list(feature_columns)
transformed_features = collections.OrderedDict()
for fc in feature_columns:
# pylint: disable=protected-access
if use_core_columns:
# pylint: disable=protected-access
tensor = fc_core._transform_features(features, [fc])[fc]
transformed_features[fc.name] = tensor
elif isinstance(fc, feature_column_lib._EmbeddingColumn):
# pylint: enable=protected-access
transformed_features[fc.name] = fc_core.input_layer(
features, [fc], weight_collections=[scope])
else:
result = feature_column_ops.transform_features(features, [fc])
if len(result) > 1:
raise ValueError("Unexpected number of output features")
transformed_features[fc.name] = result[list(result.keys())[0]]
features = transformed_features
dense_float_names = []
dense_floats = []
sparse_float_names = []
sparse_float_indices = []
sparse_float_values = []
sparse_float_shapes = []
sparse_int_names = []
sparse_int_indices = []
sparse_int_values = []
sparse_int_shapes = []
for key in sorted(features.keys()):
tensor = features[key]
# TODO(nponomareva): consider iterating over feature columns instead.
if isinstance(tensor, tuple):
# Weighted categorical feature.
categorical_tensor = tensor[0]
weight_tensor = tensor[1]
shape = categorical_tensor.dense_shape
indices = array_ops.concat([
array_ops.slice(categorical_tensor.indices, [0, 0], [-1, 1]),
array_ops.expand_dims(
math_ops.to_int64(categorical_tensor.values), -1)
], 1)
tensor = sparse_tensor.SparseTensor(
indices=indices, values=weight_tensor.values, dense_shape=shape)
if isinstance(tensor, sparse_tensor.SparseTensor):
if tensor.values.dtype == dtypes.float32:
sparse_float_names.append(key)
sparse_float_indices.append(tensor.indices)
sparse_float_values.append(tensor.values)
sparse_float_shapes.append(tensor.dense_shape)
elif tensor.values.dtype == dtypes.int64:
sparse_int_names.append(key)
sparse_int_indices.append(tensor.indices)
sparse_int_values.append(tensor.values)
sparse_int_shapes.append(tensor.dense_shape)
else:
raise ValueError("Unsupported sparse feature %s with dtype %s." %
(tensor.indices.name, tensor.dtype))
else:
if tensor.dtype == dtypes.float32:
if len(tensor.shape) > 1 and tensor.shape[1] > 1:
unstacked = array_ops.unstack(tensor, axis=1)
for i in range(len(unstacked)):
dense_float_names.append(_FEATURE_NAME_TEMPLATE % (key, i))
dense_floats.append(array_ops.reshape(unstacked[i], [-1, 1]))
else:
dense_float_names.append(key)
dense_floats.append(tensor)
else:
raise ValueError("Unsupported dense feature %s with dtype %s." %
(tensor.name, tensor.dtype))
# Feature columns are logically organized into incrementing slots starting
# from dense floats, then sparse floats then sparse ints.
fc_names = (dense_float_names + sparse_float_names + sparse_int_names)
return (fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes)
def _dropout_params(mode, ensemble_stats):
"""Returns parameters relevant for dropout.
Args:
mode: Train/Eval/Infer
ensemble_stats: A TreeEnsembleStatsOp result tuple.
Returns:
Whether to apply dropout and a dropout seed.
"""
if mode == learn.ModeKeys.TRAIN:
# Do dropout only during training.
apply_dropout = True
seed = ensemble_stats.attempted_trees
else:
seed = -1
apply_dropout = False
return apply_dropout, seed
class GradientBoostedDecisionTreeModel(object):
"""A GBDT model function."""
def __init__(self,
is_chief,
num_ps_replicas,
ensemble_handle,
center_bias,
examples_per_layer,
learner_config,
features,
logits_dimension,
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS,
feature_columns=None,
use_core_columns=False,
output_leaf_index=False,
output_leaf_index_modes=None,
num_quantiles=100):
"""Construct a new GradientBoostedDecisionTreeModel function.
Args:
is_chief: Whether to build the chief graph.
num_ps_replicas: Number of parameter server replicas, can be 0.
ensemble_handle: A handle to the ensemble variable.
center_bias: Whether to center the bias before growing trees.
examples_per_layer: Number of examples to accumulate before growing a tree
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
learner_config: A learner config.
features: `dict` of `Tensor` objects.
logits_dimension: An int, the dimension of logits.
loss_reduction: Either `SUM_OVER_NONZERO_WEIGHTS` (mean) or `SUM`.
feature_columns: A list of feature columns.
use_core_columns: A boolean specifying whether core feature columns are
used.
output_leaf_index: A boolean variable indicating whether to output leaf
index into predictions dictionary.
output_leaf_index_modes: A list of modes from (TRAIN, EVAL, INFER) which
dictates when leaf indices will be outputted. By default, leaf indices
are only outputted in INFER mode.
num_quantiles: Number of quantiles to build for numeric feature values.
Raises:
ValueError: if inputs are not valid.
"""
if ensemble_handle is None:
raise ValueError("ensemble_handle must be specified.")
if learner_config is None:
raise ValueError("learner_config must be specified.")
if learner_config.num_classes < 2:
raise ValueError("Number of classes must be >=2")
self._logits_dimension = logits_dimension
self._is_chief = is_chief
self._num_ps_replicas = num_ps_replicas
self._ensemble_handle = ensemble_handle
self._center_bias = center_bias
self._examples_per_layer = examples_per_layer
# Check loss reduction value.
if (loss_reduction != losses.Reduction.SUM and
loss_reduction != losses.Reduction.SUM_OVER_NONZERO_WEIGHTS):
raise ValueError(
"Invalid loss reduction is provided: %s." % loss_reduction)
self._loss_reduction = loss_reduction
# Fill in the defaults.
if (learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.MULTI_CLASS_STRATEGY_UNSPECIFIED):
if logits_dimension == 1:
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
else:
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
if logits_dimension == 1 or learner_config.multi_class_strategy == (
learner_pb2.LearnerConfig.TREE_PER_CLASS):
self._gradient_shape = tensor_shape.scalar()
self._hessian_shape = tensor_shape.scalar()
else:
if center_bias:
raise ValueError("Center bias should be False for multiclass.")
self._gradient_shape = tensor_shape.TensorShape([logits_dimension])
if (learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.FULL_HESSIAN):
self._hessian_shape = tensor_shape.TensorShape(
([logits_dimension, logits_dimension]))
else:
# Diagonal hessian strategy.
self._hessian_shape = tensor_shape.TensorShape(([logits_dimension]))
if (learner_config.growing_mode ==
learner_pb2.LearnerConfig.GROWING_MODE_UNSPECIFIED):
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
if (learner_config.pruning_mode ==
learner_pb2.LearnerConfig.PRUNING_MODE_UNSPECIFIED):
learner_config.pruning_mode = learner_pb2.LearnerConfig.POST_PRUNE
if learner_config.constraints.max_tree_depth == 0:
# Use 6 as the default maximum depth.
learner_config.constraints.max_tree_depth = 6
tuner = learner_config.learning_rate_tuner.WhichOneof("tuner")
if not tuner:
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
self._learner_config = learner_config
self._feature_columns = feature_columns
self._learner_config_serialized = learner_config.SerializeToString()
self._num_quantiles = num_quantiles
self._max_tree_depth = variables.VariableV1(
initial_value=self._learner_config.constraints.max_tree_depth)
self._attempted_trees = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
trainable=False,
name="attempted_trees")
self._finalized_trees = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
trainable=False,
name="finalized_trees")
if not features:
raise ValueError("Features dictionary must be specified.")
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices,
sparse_int_values, sparse_int_shapes) = extract_features(
features, self._feature_columns, use_core_columns)
logging.info("Active Feature Columns: " + str(fc_names))
logging.info("Learner config: " + str(learner_config))
self._fc_names = fc_names
self._dense_floats = dense_floats
self._sparse_float_indices = sparse_float_indices
self._sparse_float_values = sparse_float_values
self._sparse_float_shapes = sparse_float_shapes
self._sparse_int_indices = sparse_int_indices
self._sparse_int_values = sparse_int_values
self._sparse_int_shapes = sparse_int_shapes
self._reduce_dim = (
self._learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.TREE_PER_CLASS and
learner_config.num_classes == 2)
if output_leaf_index_modes is None:
output_leaf_index_modes = [learn.ModeKeys.INFER]
elif not all(
mode in (learn.ModeKeys.TRAIN, learn.ModeKeys.EVAL,
learn.ModeKeys.INFER) for mode in output_leaf_index_modes):
raise ValueError("output_leaf_index_modes should only contain ModeKeys.")
self._output_leaf_index = output_leaf_index
self._output_leaf_index_modes = output_leaf_index_modes
def _predict_and_return_dict(self, ensemble_handle, ensemble_stamp, mode):
"""Runs prediction and returns a dictionary of the prediction results.
Args:
ensemble_handle: ensemble resource handle.
ensemble_stamp: stamp of ensemble resource.
mode: learn.ModeKeys.TRAIN or EVAL or INFER.
Returns:
a dictionary of prediction results -
ENSEMBLE_STAMP, PREDICTION, PARTITION_IDS,
NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPTED.
"""
ensemble_stats = training_ops.tree_ensemble_stats(ensemble_handle,
ensemble_stamp)
num_handlers = (
len(self._dense_floats) + len(self._sparse_float_shapes) + len(
self._sparse_int_shapes))
# Used during feature selection.
used_handlers = model_ops.tree_ensemble_used_handlers(
ensemble_handle, ensemble_stamp, num_all_handlers=num_handlers)
# We don't need dropout info - we can always restore it based on the
# seed.
apply_dropout, seed = _dropout_params(mode, ensemble_stats)
# Make sure ensemble stats run. This will check that the ensemble has
# the right stamp.
with ops.control_dependencies(ensemble_stats):
leaf_index = None
if self._output_leaf_index and mode in self._output_leaf_index_modes:
predictions, _, leaf_index = (
prediction_ops).gradient_trees_prediction_verbose(
ensemble_handle,
seed,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
learner_config=self._learner_config_serialized,
apply_dropout=apply_dropout,
apply_averaging=mode != learn.ModeKeys.TRAIN,
use_locking=True,
center_bias=self._center_bias,
reduce_dim=self._reduce_dim)
else:
leaf_index = None
predictions, _ = prediction_ops.gradient_trees_prediction(
ensemble_handle,
seed,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
learner_config=self._learner_config_serialized,
apply_dropout=apply_dropout,
apply_averaging=mode != learn.ModeKeys.TRAIN,
use_locking=True,
center_bias=self._center_bias,
reduce_dim=self._reduce_dim)
partition_ids = prediction_ops.gradient_trees_partition_examples(
ensemble_handle,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
use_locking=True)
return _make_predictions_dict(ensemble_stamp, predictions, partition_ids,
ensemble_stats, used_handlers, leaf_index)
def predict(self, mode):
"""Returns predictions given the features and mode.
Args:
mode: Mode the graph is running in (train|predict|eval).
Returns:
A dict of predictions tensors.
Raises:
ValueError: if features is not valid.
"""
# Use the current ensemble to predict on the current batch of input.
# For faster prediction we check if the inputs are on the same device
# as the model. If not, we create a copy of the model on the worker.
input_deps = (
self._dense_floats + self._sparse_float_indices +
self._sparse_int_indices)
if not input_deps:
raise ValueError("No input tensors for prediction.")
# Get most current model stamp.
ensemble_stamp = model_ops.tree_ensemble_stamp_token(self._ensemble_handle)
# Determine if ensemble is colocated with the inputs.
if self._ensemble_handle.device != input_deps[0].device:
# Create a local ensemble and get its local stamp.
with ops.name_scope("local_ensemble", "TreeEnsembleVariable") as name:
local_ensemble_handle = (
gen_model_ops.decision_tree_ensemble_resource_handle_op(name=name))
create_op = gen_model_ops.create_tree_ensemble_variable(
local_ensemble_handle, stamp_token=-1, tree_ensemble_config="")
with ops.control_dependencies([create_op]):
local_stamp = model_ops.tree_ensemble_stamp_token(
local_ensemble_handle)
# Determine whether the local ensemble is stale and update it if needed.
def _refresh_local_ensemble_fn():
# Serialize the model from parameter server after reading the inputs.
with ops.control_dependencies([input_deps[0]]):
(ensemble_stamp, serialized_model) = (
model_ops.tree_ensemble_serialize(self._ensemble_handle))
# Update local ensemble with the serialized model from parameter server.
with ops.control_dependencies([create_op]):
return model_ops.tree_ensemble_deserialize(
local_ensemble_handle,
stamp_token=ensemble_stamp,
tree_ensemble_config=serialized_model), ensemble_stamp
refresh_local_ensemble, ensemble_stamp = control_flow_ops.cond(
math_ops.not_equal(ensemble_stamp,
local_stamp), _refresh_local_ensemble_fn,
lambda: (control_flow_ops.no_op(), ensemble_stamp))
# Once updated, use the local model for prediction.
with ops.control_dependencies([refresh_local_ensemble]):
return self._predict_and_return_dict(local_ensemble_handle,
ensemble_stamp, mode)
else:
# Use ensemble_handle directly, if colocated.
with ops.device(self._ensemble_handle.device):
return self._predict_and_return_dict(self._ensemble_handle,
ensemble_stamp, mode)
def _get_class_id(self, predictions_dict):
# Handle different multiclass strategies.
if (self._learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.TREE_PER_CLASS and
self._logits_dimension != 1):
# Choose the class for which the tree is built (one vs rest).
return math_ops.to_int32(
predictions_dict[NUM_TREES_ATTEMPTED] % self._logits_dimension)
return constant_op.constant(-1, dtype=dtypes.int32)
def update_stats(self, loss, predictions_dict):
"""Update the accumulators with stats from this batch.
Args:
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
Returns:
Three values:
- An op that adds a new tree to the ensemble, and
- An op that increments the stamp but removes all the trees and resets
the handlers. This can be used to reset the state of the ensemble.
- A dict containing the training state.
Raises:
ValueError: if inputs are not valid.
"""
# Get the worker device from input dependencies.
input_deps = (
self._dense_floats + self._sparse_float_indices +
self._sparse_int_indices)
worker_device = input_deps[0].device
# Get tensors relevant for training and form the loss.
predictions = predictions_dict[PREDICTIONS]
partition_ids = predictions_dict[PARTITION_IDS]
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
gradients = gradients_impl.gradients(
loss,
predictions,
name="Gradients",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
strategy = self._learner_config.multi_class_strategy
class_id = self._get_class_id(predictions_dict)
# Handle different multiclass strategies.
if strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS:
# We build one vs rest trees.
if self._logits_dimension == 1:
# We have only 1 score, gradients is of shape [batch, 1].
hessians = gradients_impl.gradients(
gradients,
predictions,
name="Hessian",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
squeezed_gradients = array_ops.squeeze(gradients, axis=[1])
squeezed_hessians = array_ops.squeeze(hessians, axis=[1])
else:
hessian_list = self._diagonal_hessian(gradients, predictions)
# Assemble hessian list into a tensor.
hessians = array_ops.stack(hessian_list, axis=1)
# Use class id tensor to get the column with that index from gradients
# and hessians.
squeezed_gradients = array_ops.squeeze(
_get_column_by_index(gradients, class_id))
squeezed_hessians = array_ops.squeeze(
_get_column_by_index(hessians, class_id))
else:
# Other multiclass strategies.
if strategy == learner_pb2.LearnerConfig.FULL_HESSIAN:
hessian_list = self._full_hessian(gradients, predictions)
else:
# Diagonal hessian strategy.
hessian_list = self._diagonal_hessian(gradients, predictions)
squeezed_gradients = gradients
hessians = array_ops.stack(hessian_list, axis=1)
squeezed_hessians = hessians
# Get the weights for each example for quantiles calculation,
weights = self._get_weights(self._hessian_shape, squeezed_hessians)
# Create all handlers ensuring resources are evenly allocated across PS.
fc_name_idx = 0
handlers = []
init_stamp_token = constant_op.constant(0, dtype=dtypes.int64)
l1_regularization = constant_op.constant(
self._learner_config.regularization.l1, dtypes.float32)
l2_regularization = constant_op.constant(
self._learner_config.regularization.l2, dtypes.float32)
tree_complexity_regularization = constant_op.constant(
self._learner_config.regularization.tree_complexity, dtypes.float32)
min_node_weight = constant_op.constant(
self._learner_config.constraints.min_node_weight, dtypes.float32)
loss_uses_sum_reduction = self._loss_reduction == losses.Reduction.SUM
loss_uses_sum_reduction = constant_op.constant(loss_uses_sum_reduction)
weak_learner_type = constant_op.constant(
self._learner_config.weak_learner_type)
num_quantiles = self._num_quantiles
epsilon = 1.0 / num_quantiles
strategy_tensor = constant_op.constant(strategy)
with ops.device(self._get_replica_device_setter(worker_device)):
# Create handlers for dense float columns
for dense_float_column_idx in range(len(self._dense_floats)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
ordinal_split_handler.DenseSplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
dense_float_column_idx),
epsilon=epsilon,
num_quantiles=num_quantiles,
dense_float_column=self._dense_floats[dense_float_column_idx],
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction,
weak_learner_type=weak_learner_type,
))
fc_name_idx += 1
# Create handlers for sparse float columns.
for sparse_float_column_idx in range(len(self._sparse_float_indices)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
ordinal_split_handler.SparseSplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
sparse_float_column_idx),
epsilon=epsilon,
num_quantiles=num_quantiles,
sparse_float_column=sparse_tensor.SparseTensor(
self._sparse_float_indices[sparse_float_column_idx],
self._sparse_float_values[sparse_float_column_idx],
self._sparse_float_shapes[sparse_float_column_idx]),
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction))
fc_name_idx += 1
# Create handlers for sparse int columns.
for sparse_int_column_idx in range(len(self._sparse_int_indices)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
categorical_split_handler.EqualitySplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
sparse_int_column_idx),
sparse_int_column=sparse_tensor.SparseTensor(
self._sparse_int_indices[sparse_int_column_idx],
self._sparse_int_values[sparse_int_column_idx],
self._sparse_int_shapes[sparse_int_column_idx]),
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction,
weak_learner_type=weak_learner_type))
fc_name_idx += 1
# Create ensemble stats variables.
num_layer_examples = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layer_examples",
trainable=False)
num_layer_steps = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layer_steps",
trainable=False)
num_layers = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layers",
trainable=False)
active_tree = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="active_tree",
trainable=False)
active_layer = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="active_layer",
trainable=False)
# Variable that becomes false once bias centering is done.
continue_centering = variables.VariableV1(
initial_value=self._center_bias,
name="continue_centering",
trainable=False)
# Create bias stats accumulator.
bias_stats_accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
name="BiasAccumulator")
# Create steps accumulator.
steps_accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar(),
name="StepsAccumulator")
# Create ensemble stats summaries.
summary.scalar("layer_stats/num_examples", num_layer_examples)
summary.scalar("layer_stats/num_steps", num_layer_steps)
summary.scalar("ensemble_stats/active_tree", active_tree)
summary.scalar("ensemble_stats/active_layer", active_layer)
# Update bias stats.
stats_update_ops = []
stats_update_ops.append(
control_flow_ops.cond(
continue_centering,
self._make_update_bias_stats_fn(
ensemble_stamp, predictions, gradients,
bias_stats_accumulator), control_flow_ops.no_op))
# Update handler stats.
handler_reads = collections.OrderedDict()
for handler in handlers:
handler_reads[handler] = handler.scheduled_reads()
handler_results = batch_ops_utils.run_handler_scheduled_ops(
handler_reads, ensemble_stamp, worker_device)
per_handler_updates = collections.OrderedDict()
# Two values per handler. First one is if the handler is active for the
# current layer. The second one is if the handler is going to be active
# for the next layer.
subsampling_type = self._learner_config.WhichOneof("feature_fraction")
if subsampling_type == "feature_fraction_per_level":
seed = predictions_dict[NUM_LAYERS_ATTEMPTED]
active_handlers_current_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed, 1])
active_handlers_next_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed + 1, 1])
active_handlers = array_ops.stack(
[active_handlers_current_layer, active_handlers_next_layer], axis=1)
active_handlers = (
active_handlers < self._learner_config.feature_fraction_per_level)
elif subsampling_type == "feature_fraction_per_tree":
seed = predictions_dict[NUM_TREES_ATTEMPTED]
active_handlers_current_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed, 2])
active_handlers_current_layer = (
active_handlers_current_layer <
self._learner_config.feature_fraction_per_tree)
active_handlers = array_ops.stack(
[
active_handlers_current_layer,
array_ops.ones([len(handlers)], dtype=dtypes.bool)
],
axis=1)
else:
active_handlers = array_ops.ones([len(handlers), 2], dtype=dtypes.bool)
if self._learner_config.constraints.max_number_of_unique_feature_columns:
target = (
self._learner_config.constraints.max_number_of_unique_feature_columns)
def _feature_selection_active_handlers():
# The active list for current and the next iteration.
used_handlers = array_ops.reshape(predictions_dict[USED_HANDLERS_MASK],
[-1, 1])
used_handlers = array_ops.concat([used_handlers, used_handlers], axis=1)
return math_ops.logical_and(used_handlers, active_handlers)
active_handlers = (
control_flow_ops.cond(predictions_dict[NUM_USED_HANDLERS] >= target,
_feature_selection_active_handlers,
lambda: active_handlers))
# Prepare empty gradients and hessians when handlers are not ready.
empty_hess_shape = [1] + self._hessian_shape.as_list()
empty_grad_shape = [1] + self._gradient_shape.as_list()
empty_gradients = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_hess_shape)
active_handlers = array_ops.unstack(active_handlers, axis=0)
for handler_idx in range(len(handlers)):
handler = handlers[handler_idx]
is_active = active_handlers[handler_idx]
updates, scheduled_updates = handler.update_stats(
ensemble_stamp, partition_ids, squeezed_gradients, squeezed_hessians,
empty_gradients, empty_hessians, weights, is_active,
handler_results[handler])
stats_update_ops.append(updates)
per_handler_updates[handler] = scheduled_updates
update_results = batch_ops_utils.run_handler_scheduled_ops(
per_handler_updates, ensemble_stamp, worker_device)
for update in update_results.values():
stats_update_ops += update
training_state = GBDTTrainingState(
num_layer_examples=num_layer_examples,
num_layer_steps=num_layer_steps,
num_layers=num_layers,
active_tree=active_tree,
active_layer=active_layer,
continue_centering=continue_centering,
bias_stats_accumulator=bias_stats_accumulator,
steps_accumulator=steps_accumulator,
handlers=handlers)
reset_op = control_flow_ops.no_op()
if self._is_chief:
# Advance the ensemble stamp to throw away staggered workers.
stamp_token, _ = model_ops.tree_ensemble_serialize(self._ensemble_handle)
next_stamp_token = stamp_token + 1
reset_ops = []
for handler in handlers:
reset_ops.append(handler.reset(stamp_token, next_stamp_token))
if self._center_bias:
reset_ops.append(
bias_stats_accumulator.flush(stamp_token, next_stamp_token))
reset_ops.append(steps_accumulator.flush(stamp_token, next_stamp_token))
reset_ops.append(self._finalized_trees.assign(0).op)
reset_ops.append(self._attempted_trees.assign(0).op)
reset_ops.append(
model_ops.tree_ensemble_deserialize(
self._ensemble_handle,
stamp_token=next_stamp_token,
tree_ensemble_config="",
name="reset_gbdt"))
reset_op = control_flow_ops.group([reset_ops])
return stats_update_ops, reset_op, training_state
def increment_step_counter_and_maybe_update_ensemble(self, predictions_dict,
training_state):
"""Increments number of visited examples and grows the ensemble.
If the number of visited examples reaches the target examples_per_layer,
ensemble is updated.
Args:
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
training_state: `dict` returned by update_stats.
Returns:
An op that updates the counters and potientially grows the ensemble.
"""
batch_size = math_ops.cast(
array_ops.shape(predictions_dict[PREDICTIONS])[0], dtypes.float32)
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
# Accumulate a step after updating stats.
steps_accumulator = training_state.steps_accumulator
num_layer_examples = training_state.num_layer_examples
num_layer_steps = training_state.num_layer_steps
active_layer = training_state.active_layer
add_step_op = steps_accumulator.add(
ensemble_stamp, [0], [[0, 0]], [batch_size], [1.0])
# After adding the step, decide if further processing is needed.
ensemble_update_ops = [add_step_op]
class_id = self._get_class_id(predictions_dict)
with ops.control_dependencies([add_step_op]):
if self._is_chief:
dropout_seed = predictions_dict[NUM_TREES_ATTEMPTED]
# Get accumulated steps and examples for the current layer.
_, _, _, _, acc_examples, acc_steps = (
steps_accumulator.serialize())
acc_examples = math_ops.cast(acc_examples[0], dtypes.int64)
acc_steps = math_ops.cast(acc_steps[0], dtypes.int64)
ensemble_update_ops.append(
num_layer_examples.assign(acc_examples))
ensemble_update_ops.append(num_layer_steps.assign(acc_steps))
# Determine whether we need to update tree ensemble.
examples_per_layer = self._examples_per_layer
if callable(examples_per_layer):
examples_per_layer = examples_per_layer(active_layer)
ensemble_update_ops.append(
control_flow_ops.cond(
acc_examples >= examples_per_layer,
self.make_update_ensemble_fn(ensemble_stamp, training_state,
dropout_seed, class_id),
control_flow_ops.no_op))
# Note, the loss is calculated from the prediction considering dropouts, so
# that the value might look staggering over steps when the dropout ratio is
# high. eval_loss might be referred instead in the aspect of convergence.
return control_flow_ops.group(*ensemble_update_ops)
def make_update_ensemble_fn(self, ensemble_stamp, training_state,
dropout_seed, class_id):
"""A method to create the function which updates the tree ensemble."""
# Determine learning rate.
learning_rate_tuner = self._learner_config.learning_rate_tuner.WhichOneof(
"tuner")
if learning_rate_tuner == "fixed" or learning_rate_tuner == "dropout":
tuner = getattr(self._learner_config.learning_rate_tuner,
learning_rate_tuner)
learning_rate = tuner.learning_rate
else:
# TODO(nponomareva, soroush) do the line search.
raise ValueError("Line search learning rate is not yet supported.")
def _update_ensemble():
"""A method to update the tree ensemble."""
# Get next stamp token.
next_ensemble_stamp = ensemble_stamp + 1
# Finalize bias stats.
_, _, _, bias_grads, bias_hess = (
training_state.bias_stats_accumulator.flush(ensemble_stamp,
next_ensemble_stamp))
# Finalize handler splits.
are_splits_ready_list = []
partition_ids_list = []
gains_list = []
split_info_list = []
for handler in training_state.handlers:
(are_splits_ready,
partition_ids, gains, split_info) = handler.make_splits(
ensemble_stamp, next_ensemble_stamp, class_id)
are_splits_ready_list.append(are_splits_ready)
partition_ids_list.append(partition_ids)
gains_list.append(gains)
split_info_list.append(split_info)
# Stack all the inputs to one tensor per type.
# This is a workaround for the slowness of graph building in tf.cond.
# See (b/36554864).
split_sizes = array_ops.reshape(
array_ops.shape_n(partition_ids_list), [len(partition_ids_list)])
partition_ids = array_ops.concat(partition_ids_list, axis=0)
gains = array_ops.concat(gains_list, axis=0)
split_infos = array_ops.concat(split_info_list, axis=0)
# Determine if all splits are ready.
are_all_splits_ready = math_ops.reduce_all(
array_ops.stack(
are_splits_ready_list, axis=0, name="stack_handler_readiness"))
# Define bias centering update operation.
def _center_bias_fn():
# Center tree ensemble bias.
delta_updates = array_ops.where(bias_hess > 0, -bias_grads / bias_hess,
array_ops.zeros_like(bias_grads))
center_bias = training_ops.center_tree_ensemble_bias(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
delta_updates=delta_updates,
learner_config=self._learner_config_serialized)
return training_state.continue_centering.assign(center_bias)
# Define ensemble growing operations.
def _grow_ensemble_ready_fn():
# Grow the ensemble given the current candidates.
sizes = array_ops.unstack(split_sizes)
partition_ids_list = list(array_ops.split(partition_ids, sizes, axis=0))
# When using the oblivious decision tree as weak learner, it produces
# one gain and one split per handler and not number of partitions.
if self._learner_config.weak_learner_type == (
learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE):
sizes = len(training_state.handlers)
gains_list = list(array_ops.split(gains, sizes, axis=0))
split_info_list = list(array_ops.split(split_infos, sizes, axis=0))
return training_ops.grow_tree_ensemble(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
learning_rate=learning_rate,
partition_ids=partition_ids_list,
gains=gains_list,
splits=split_info_list,
learner_config=self._learner_config_serialized,
dropout_seed=dropout_seed,
center_bias=self._center_bias,
max_tree_depth=self._max_tree_depth,
weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_not_ready_fn():
# Don't grow the ensemble, just update the stamp.
return training_ops.grow_tree_ensemble(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
learning_rate=0,
partition_ids=[],
gains=[],
splits=[],
learner_config=self._learner_config_serialized,
dropout_seed=dropout_seed,
center_bias=self._center_bias,
max_tree_depth=self._max_tree_depth,
weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_fn():
# Conditionally grow an ensemble depending on whether the splits
# from all the handlers are ready.
return control_flow_ops.cond(are_all_splits_ready,
_grow_ensemble_ready_fn,
_grow_ensemble_not_ready_fn)
# Update ensemble.
update_ops = [are_all_splits_ready]
if self._center_bias:
update_model = control_flow_ops.cond(training_state.continue_centering,
_center_bias_fn, _grow_ensemble_fn)
else:
update_model = _grow_ensemble_fn()
update_ops.append(update_model)
# Update ensemble stats.
with ops.control_dependencies([update_model]):
stats = training_ops.tree_ensemble_stats(
self._ensemble_handle, stamp_token=next_ensemble_stamp)
update_ops.append(self._finalized_trees.assign(stats.num_trees))
update_ops.append(self._attempted_trees.assign(stats.attempted_trees))
update_ops.append(training_state.num_layers.assign(stats.num_layers))
update_ops.append(training_state.active_tree.assign(stats.active_tree))
update_ops.append(
training_state.active_layer.assign(stats.active_layer))
# Flush step stats.
update_ops.extend(
training_state.steps_accumulator.flush(ensemble_stamp,
next_ensemble_stamp))
return control_flow_ops.group(*update_ops, name="update_ensemble")
return _update_ensemble
def get_number_of_trees_tensor(self):
return self._finalized_trees, self._attempted_trees
def get_max_tree_depth(self):
return self._max_tree_depth
def train(self, loss, predictions_dict, labels):
"""Updates the accumalator stats and grows the ensemble.
Args:
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
labels: Rank 2 `Tensor` representing labels per example. Has no effect
on the training and is only kept for backward compatibility.
Returns:
An op that adds a new tree to the ensemble.
Raises:
ValueError: if inputs are not valid.
"""
del labels # unused; kept for backward compatibility.
update_op, _, training_state = self.update_stats(loss, predictions_dict)
with ops.control_dependencies(update_op):
return self.increment_step_counter_and_maybe_update_ensemble(
predictions_dict, training_state)
def _get_weights(self, hessian_shape, hessians):
"""Derives weights to be used based on hessians and multiclass strategy."""
if hessian_shape == tensor_shape.scalar():
# This is tree per class.
weights = hessians
elif len(hessian_shape.dims) == 1:
# This is diagonal hessian.
weights = math_ops.reduce_sum(hessians, axis=1)
else:
# This is full hessian.
weights = math_ops.trace(hessians)
return weights
def _full_hessian(self, grads, predictions):
"""Prepares hessians for full-hessian multiclass strategy."""
# Because of
# https://github.com/tensorflow/tensorflow/issues/675, we can't just
# compute the full hessian with a single call to gradients, but instead
# must compute it row-by-row.
gradients_list = array_ops.unstack(
grads, num=self._logits_dimension, axis=1)
hessian_rows = []
for row in range(self._logits_dimension):
# If current row is i, K is number of classes,each row returns a tensor of
# size batch_size x K representing for each example dx_i dx_1, dx_i dx_2
# etc dx_i dx_K
hessian_row = gradients_impl.gradients(
gradients_list[row],
predictions,
name="Hessian_%d" % row,
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)
# hessian_row is of dimension 1, batch_size, K, => trim first dimension
# to get batch_size x K
hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])
hessian_rows.append(hessian_row)
return hessian_rows
def _diagonal_hessian(self, grads, predictions):
"""Prepares hessians for diagonal-hessian multiclass mode."""
diag_hessian_list = []
gradients_list = array_ops.unstack(
grads, num=self._logits_dimension, axis=1)
for row, row_grads in enumerate(gradients_list):
# If current row is i, K is number of classes,each row returns a tensor of
# size batch_size x K representing for each example dx_i dx_1, dx_1 dx_2
# etc dx_i dx_K
hessian_row = gradients_impl.gradients(
row_grads,
predictions,
name="Hessian_%d" % row,
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)
# hessian_row is of dimension 1, batch_size, K, => trim first dimension
# to get batch_size x K
hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])
# Get dx_i^2 for the whole batch.
elem = array_ops.transpose(hessian_row)[row]
diag_hessian_list.append(elem)
return diag_hessian_list
def _get_replica_device_setter(self, worker_device):
"""Creates a replica device setter."""
ps_tasks = self._num_ps_replicas
ps_ops = [
"Variable",
"VariableV2",
"DecisionTreeEnsembleResourceHandleOp",
"StatsAccumulatorScalarResourceHandleOp",
"StatsAccumulatorTensorResourceHandleOp",
]
ps_strategy = _OpRoundRobinStrategy(ps_ops, ps_tasks)
return device_setter.replica_device_setter(
worker_device=worker_device,
ps_tasks=ps_tasks,
merge_devices=True,
ps_ops=ps_ops,
ps_strategy=ps_strategy)
def _make_update_bias_stats_fn(self, ensemble_stamp, predictions, gradients,
bias_stats_accumulator):
"""A method to create the function which updates the bias stats."""
def _update_bias_stats():
"""A method to update the bias stats."""
# Get reduced gradients and hessians.
grads_sum = math_ops.reduce_sum(gradients, 0)
hess = gradients_impl.gradients(
grads_sum,
predictions,
name="Hessians",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
hess_sum = math_ops.reduce_sum(hess, 0)
# Accumulate gradients and hessians.
partition_ids = math_ops.range(self._logits_dimension)
feature_ids = array_ops.zeros(
[self._logits_dimension, 2], dtype=dtypes.int64)
add_stats_op = bias_stats_accumulator.add(
ensemble_stamp, partition_ids, feature_ids, grads_sum, hess_sum)
return control_flow_ops.group(*[add_stats_op], name="update_bias_stats")
return _update_bias_stats
| Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py | 55,581 | A GBDT model function.
Returns the next ps task index for placement via per-Op round-robin order.
This strategy works slightly better for the GBDT graph because of using
custom resources which vary significantly in compute cost.
Choose a ps task index for the given `Operation`.
Args:
op: An `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Returns the next
index, in the range `[offset, offset + num_tasks)`.
Raises:
ValueError: If attempting to place non-PS Op.
Create a new `_RoundRobinStrategy`.
Args:
ps_ops: List of Op types to place on PS.
num_tasks: Number of ps tasks to cycle among.
Construct a new GradientBoostedDecisionTreeModel function.
Args:
is_chief: Whether to build the chief graph.
num_ps_replicas: Number of parameter server replicas, can be 0.
ensemble_handle: A handle to the ensemble variable.
center_bias: Whether to center the bias before growing trees.
examples_per_layer: Number of examples to accumulate before growing a tree
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
learner_config: A learner config.
features: `dict` of `Tensor` objects.
logits_dimension: An int, the dimension of logits.
loss_reduction: Either `SUM_OVER_NONZERO_WEIGHTS` (mean) or `SUM`.
feature_columns: A list of feature columns.
use_core_columns: A boolean specifying whether core feature columns are
used.
output_leaf_index: A boolean variable indicating whether to output leaf
index into predictions dictionary.
output_leaf_index_modes: A list of modes from (TRAIN, EVAL, INFER) which
dictates when leaf indices will be outputted. By default, leaf indices
are only outputted in INFER mode.
num_quantiles: Number of quantiles to build for numeric feature values.
Raises:
ValueError: if inputs are not valid.
Prepares hessians for diagonal-hessian multiclass mode.
Returns parameters relevant for dropout.
Args:
mode: Train/Eval/Infer
ensemble_stats: A TreeEnsembleStatsOp result tuple.
Returns:
Whether to apply dropout and a dropout seed.
Prepares hessians for full-hessian multiclass strategy.
Returns columns from a 2-D tensor by index.
Creates a replica device setter.
Derives weights to be used based on hessians and multiclass strategy.
Returns predictions for the given logits and n_classes.
Args:
stamp: The ensemble stamp.
logits: A rank 2 `Tensor` with shape [batch_size, n_classes - 1]. that
contains predictions when no dropout was applied.
partition_ids: A rank 1 `Tensor` with shape [batch_size].
ensemble_stats: A TreeEnsembleStatsOp result tuple.
used_handlers: A TreeEnsembleUsedHandlerOp result tuple of an int and a
boolean mask.
leaf_index: A rank 2 `Tensor` with shape [batch_size, number of trees]. that
contains leaf id for each example prediction.
Returns:
A dict of predictions.
A method to create the function which updates the bias stats.
Runs prediction and returns a dictionary of the prediction results.
Args:
ensemble_handle: ensemble resource handle.
ensemble_stamp: stamp of ensemble resource.
mode: learn.ModeKeys.TRAIN or EVAL or INFER.
Returns:
a dictionary of prediction results -
ENSEMBLE_STAMP, PREDICTION, PARTITION_IDS,
NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPTED.
A method to update the bias stats.
A method to update the tree ensemble.
Extracts columns from a dictionary of features.
Args:
features: `dict` of `Tensor` objects.
feature_columns: A list of feature_columns.
Returns:
Seven values:
- A list of all feature column names.
- A list of dense floats.
- A list of sparse float feature indices.
- A list of sparse float feature values.
- A list of sparse float feature shapes.
- A list of sparse int feature indices.
- A list of sparse int feature values.
- A list of sparse int feature shapes.
Raises:
ValueError: if features is not valid.
Increments number of visited examples and grows the ensemble.
If the number of visited examples reaches the target examples_per_layer,
ensemble is updated.
Args:
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
training_state: `dict` returned by update_stats.
Returns:
An op that updates the counters and potientially grows the ensemble.
A method to create the function which updates the tree ensemble.
Returns predictions given the features and mode.
Args:
mode: Mode the graph is running in (train|predict|eval).
Returns:
A dict of predictions tensors.
Raises:
ValueError: if features is not valid.
Updates the accumalator stats and grows the ensemble.
Args:
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
labels: Rank 2 `Tensor` representing labels per example. Has no effect
on the training and is only kept for backward compatibility.
Returns:
An op that adds a new tree to the ensemble.
Raises:
ValueError: if inputs are not valid.
Update the accumulators with stats from this batch.
Args:
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
Returns:
Three values:
- An op that adds a new tree to the ensemble, and
- An op that increments the stamp but removes all the trees and resets
the handlers. This can be used to reset the state of the ensemble.
- A dict containing the training state.
Raises:
ValueError: if inputs are not valid.
Training functions for Gradient boosted decision trees.
Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Key names for prediction dict. Keys in Training state. Make a shallow copy of features to ensure downstream usage is unaffected by modifications in the model function. pylint: disable=protected-access pylint: disable=protected-access pylint: enable=protected-access TODO(nponomareva): consider iterating over feature columns instead. Weighted categorical feature. Feature columns are logically organized into incrementing slots starting from dense floats, then sparse floats then sparse ints. Do dropout only during training. Check loss reduction value. Fill in the defaults. Diagonal hessian strategy. Use 6 as the default maximum depth. Used during feature selection. We don't need dropout info - we can always restore it based on the seed. Make sure ensemble stats run. This will check that the ensemble has the right stamp. Use the current ensemble to predict on the current batch of input. For faster prediction we check if the inputs are on the same device as the model. If not, we create a copy of the model on the worker. Get most current model stamp. Determine if ensemble is colocated with the inputs. Create a local ensemble and get its local stamp. Determine whether the local ensemble is stale and update it if needed. Serialize the model from parameter server after reading the inputs. Update local ensemble with the serialized model from parameter server. Once updated, use the local model for prediction. Use ensemble_handle directly, if colocated. Handle different multiclass strategies. Choose the class for which the tree is built (one vs rest). Get the worker device from input dependencies. Get tensors relevant for training and form the loss. Handle different multiclass strategies. We build one vs rest trees. We have only 1 score, gradients is of shape [batch, 1]. Assemble hessian list into a tensor. Use class id tensor to get the column with that index from gradients and hessians. Other multiclass strategies. Diagonal hessian strategy. Get the weights for each example for quantiles calculation, Create all handlers ensuring resources are evenly allocated across PS. Create handlers for dense float columns Create handlers for sparse float columns. Create handlers for sparse int columns. Create ensemble stats variables. Variable that becomes false once bias centering is done. Create bias stats accumulator. Create steps accumulator. Create ensemble stats summaries. Update bias stats. Update handler stats. Two values per handler. First one is if the handler is active for the current layer. The second one is if the handler is going to be active for the next layer. The active list for current and the next iteration. Prepare empty gradients and hessians when handlers are not ready. Advance the ensemble stamp to throw away staggered workers. Accumulate a step after updating stats. After adding the step, decide if further processing is needed. Get accumulated steps and examples for the current layer. Determine whether we need to update tree ensemble. Note, the loss is calculated from the prediction considering dropouts, so that the value might look staggering over steps when the dropout ratio is high. eval_loss might be referred instead in the aspect of convergence. Determine learning rate. TODO(nponomareva, soroush) do the line search. Get next stamp token. Finalize bias stats. Finalize handler splits. Stack all the inputs to one tensor per type. This is a workaround for the slowness of graph building in tf.cond. See (b/36554864). Determine if all splits are ready. Define bias centering update operation. Center tree ensemble bias. Define ensemble growing operations. Grow the ensemble given the current candidates. When using the oblivious decision tree as weak learner, it produces one gain and one split per handler and not number of partitions. Don't grow the ensemble, just update the stamp. Conditionally grow an ensemble depending on whether the splits from all the handlers are ready. Update ensemble. Update ensemble stats. Flush step stats. unused; kept for backward compatibility. This is tree per class. This is diagonal hessian. This is full hessian. Because of https://github.com/tensorflow/tensorflow/issues/675, we can't just compute the full hessian with a single call to gradients, but instead must compute it row-by-row. If current row is i, K is number of classes,each row returns a tensor of size batch_size x K representing for each example dx_i dx_1, dx_i dx_2 etc dx_i dx_K hessian_row is of dimension 1, batch_size, K, => trim first dimension to get batch_size x K If current row is i, K is number of classes,each row returns a tensor of size batch_size x K representing for each example dx_i dx_1, dx_1 dx_2 etc dx_i dx_K hessian_row is of dimension 1, batch_size, K, => trim first dimension to get batch_size x K Get dx_i^2 for the whole batch. Get reduced gradients and hessians. Accumulate gradients and hessians. | 11,309 | en | 0.788245 |
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class UpdateIndirectPartnerAccountResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'transfer_id': 'str'
}
attribute_map = {
'transfer_id': 'transfer_id'
}
def __init__(self, transfer_id=None):
"""UpdateIndirectPartnerAccountResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._transfer_id = None
self.discriminator = None
if transfer_id is not None:
self.transfer_id = transfer_id
@property
def transfer_id(self):
"""Gets the transfer_id of this UpdateIndirectPartnerAccountResponse.
事务流水ID,只有成功响应才会返回。
:return: The transfer_id of this UpdateIndirectPartnerAccountResponse.
:rtype: str
"""
return self._transfer_id
@transfer_id.setter
def transfer_id(self, transfer_id):
"""Sets the transfer_id of this UpdateIndirectPartnerAccountResponse.
事务流水ID,只有成功响应才会返回。
:param transfer_id: The transfer_id of this UpdateIndirectPartnerAccountResponse.
:type: str
"""
self._transfer_id = transfer_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateIndirectPartnerAccountResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/update_indirect_partner_account_response.py | 3,109 | Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
Returns true if both objects are equal
UpdateIndirectPartnerAccountResponse - a model defined in huaweicloud sdk
Returns true if both objects are not equal
For `print` and `pprint`
Returns the model properties as a dict
Returns the string representation of the model
Gets the transfer_id of this UpdateIndirectPartnerAccountResponse.
事务流水ID,只有成功响应才会返回。
:return: The transfer_id of this UpdateIndirectPartnerAccountResponse.
:rtype: str
Sets the transfer_id of this UpdateIndirectPartnerAccountResponse.
事务流水ID,只有成功响应才会返回。
:param transfer_id: The transfer_id of this UpdateIndirectPartnerAccountResponse.
:type: str
coding: utf-8 | 868 | en | 0.504552 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Northwestern University.
#
# invenio-subjects-mesh is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Version information for invenio-subjects-mesh.
This file is imported by ``invenio_subjects_mesh.__init__``,
and parsed by ``setup.py``.
"""
__version__ = '2021.7.13'
| invenio_subjects_mesh/version.py | 404 | Version information for invenio-subjects-mesh.
This file is imported by ``invenio_subjects_mesh.__init__``,
and parsed by ``setup.py``.
-*- coding: utf-8 -*- Copyright (C) 2021 Northwestern University. invenio-subjects-mesh is free software; you can redistribute it and/or modify it under the terms of the MIT License; see LICENSE file for more details. | 356 | en | 0.771087 |
# ---------------------------------------------------------------------
# Segment handlers
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import logging
# NOC modules
from noc.sa.models.managedobject import ManagedObject
from noc.fm.models.activealarm import ActiveAlarm
logger = logging.getLogger(__name__)
def set_segment_redundancy(alarm):
"""
Set lost_redundancy to segment when redundant object is down
:param alarm:
:return:
"""
if alarm.root:
return # Already changed by root cause
mo = alarm.managed_object
seg = mo.segment
if seg.is_redundant and not seg.lost_redundancy:
u = mo.data.uplinks
if len(u) > 1:
logger.info("[%s] Redundancy lost for %s", alarm.id, seg.name)
seg.set_lost_redundancy(True)
def check_segment_redundancy(alarm):
"""
Reset lost_redundancy from segment when all redundant objects
are up
:param alarm:
:return:
"""
mo = alarm.managed_object
seg = mo.segment
if not seg.is_redundant or not seg.lost_redundancy:
return
u = mo.data.uplinks
if len(u) < 2:
return
seg_objects = list(seg.managed_objects.values_list("id", flat=True))
alarms = [
d["managed_object"]
for d in ActiveAlarm._get_collection().find(
{"managed_object": {"$in": seg_objects}}, {"_id": 0, "managed_object": 1}
)
if d["managed_object"] != mo.id
]
uplinks = ManagedObject.uplinks_for_objects(alarms)
if not any(x for x in uplinks.values() if len(x) > 1):
logger.info("[%s] Redundancy recovered for %s", alarm.id, seg.name)
seg.set_lost_redundancy(False)
| fm/handlers/alarm/segment.py | 1,865 | Reset lost_redundancy from segment when all redundant objects
are up
:param alarm:
:return:
Set lost_redundancy to segment when redundant object is down
:param alarm:
:return:
--------------------------------------------------------------------- Segment handlers --------------------------------------------------------------------- Copyright (C) 2007-2020 The NOC Project See LICENSE for details --------------------------------------------------------------------- Python modules NOC modules Already changed by root cause | 525 | en | 0.452214 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Spectrogram decomposition
=========================
.. autosummary::
:toctree: generated/
decompose
hpss
nn_filter
"""
import numpy as np
import scipy.sparse
from scipy.ndimage import median_filter
import sklearn.decomposition
from . import core
from . import cache
from . import segment
from . import util
from .util.exceptions import ParameterError
__all__ = ['decompose', 'hpss', 'nn_filter']
def decompose(S, n_components=None, transformer=None, sort=False, fit=True, **kwargs):
"""Decompose a feature matrix.
Given a spectrogram `S`, produce a decomposition into `components`
and `activations` such that `S ~= components.dot(activations)`.
By default, this is done with with non-negative matrix factorization (NMF),
but any `sklearn.decomposition`-type object will work.
Parameters
----------
S : np.ndarray [shape=(n_features, n_samples), dtype=float]
The input feature matrix (e.g., magnitude spectrogram)
n_components : int > 0 [scalar] or None
number of desired components
if None, then `n_features` components are used
transformer : None or object
If None, use `sklearn.decomposition.NMF`
Otherwise, any object with a similar interface to NMF should work.
`transformer` must follow the scikit-learn convention, where
input data is `(n_samples, n_features)`.
`transformer.fit_transform()` will be run on `S.T` (not `S`),
the return value of which is stored (transposed) as `activations`
The components will be retrieved as `transformer.components_.T`
`S ~= np.dot(activations, transformer.components_).T`
or equivalently:
`S ~= np.dot(transformer.components_.T, activations.T)`
sort : bool
If `True`, components are sorted by ascending peak frequency.
.. note:: If used with `transformer`, sorting is applied to copies
of the decomposition parameters, and not to `transformer`'s
internal parameters.
fit : bool
If `True`, components are estimated from the input ``S``.
If `False`, components are assumed to be pre-computed and stored
in ``transformer``, and are not changed.
kwargs : Additional keyword arguments to the default transformer
`sklearn.decomposition.NMF`
Returns
-------
components: np.ndarray [shape=(n_features, n_components)]
matrix of components (basis elements).
activations: np.ndarray [shape=(n_components, n_samples)]
transformed matrix/activation matrix
Raises
------
ParameterError
if `fit` is False and no `transformer` object is provided.
See Also
--------
sklearn.decomposition : SciKit-Learn matrix decomposition modules
Examples
--------
Decompose a magnitude spectrogram into 32 components with NMF
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> comps, acts = librosa.decompose.decompose(S, n_components=8)
>>> comps
array([[ 1.876e-01, 5.559e-02, ..., 1.687e-01, 4.907e-02],
[ 3.148e-01, 1.719e-01, ..., 2.314e-01, 9.493e-02],
...,
[ 1.561e-07, 8.564e-08, ..., 7.167e-08, 4.997e-08],
[ 1.531e-07, 7.880e-08, ..., 5.632e-08, 4.028e-08]])
>>> acts
array([[ 4.197e-05, 8.512e-03, ..., 3.056e-05, 9.159e-06],
[ 9.568e-06, 1.718e-02, ..., 3.322e-05, 7.869e-06],
...,
[ 5.982e-05, 1.311e-02, ..., -0.000e+00, 6.323e-06],
[ 3.782e-05, 7.056e-03, ..., 3.290e-05, -0.000e+00]])
Sort components by ascending peak frequency
>>> comps, acts = librosa.decompose.decompose(S, n_components=16,
... sort=True)
Or with sparse dictionary learning
>>> import sklearn.decomposition
>>> T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=16)
>>> scomps, sacts = librosa.decompose.decompose(S, transformer=T, sort=True)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10,8))
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Input spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(3, 2, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(comps,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Components')
>>> plt.subplot(3, 2, 4)
>>> librosa.display.specshow(acts, x_axis='time')
>>> plt.ylabel('Components')
>>> plt.title('Activations')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> S_approx = comps.dot(acts)
>>> librosa.display.specshow(librosa.amplitude_to_db(S_approx,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Reconstructed spectrogram')
>>> plt.tight_layout()
"""
if transformer is None:
if fit is False:
raise ParameterError('fit must be True if transformer is None')
transformer = sklearn.decomposition.NMF(n_components=n_components,
**kwargs)
if n_components is None:
n_components = S.shape[0]
if fit:
activations = transformer.fit_transform(S.T).T
else:
activations = transformer.transform(S.T).T
components = transformer.components_.T
if sort:
components, idx = util.axis_sort(components, index=True)
activations = activations[idx]
return components, activations
@cache(level=30)
def hpss(S, kernel_size=31, power=2.0, mask=False, margin=1.0):
"""Median-filtering harmonic percussive source separation (HPSS).
If `margin = 1.0`, decomposes an input spectrogram `S = H + P`
where `H` contains the harmonic components,
and `P` contains the percussive components.
If `margin > 1.0`, decomposes an input spectrogram `S = H + P + R`
where `R` contains residual components not included in `H` or `P`.
This implementation is based upon the algorithm described by [1]_ and [2]_.
.. [1] Fitzgerald, Derry.
"Harmonic/percussive separation using median filtering."
13th International Conference on Digital Audio Effects (DAFX10),
Graz, Austria, 2010.
.. [2] Driedger, Müller, Disch.
"Extending harmonic-percussive separation of audio."
15th International Society for Music Information Retrieval Conference (ISMIR 2014),
Taipei, Taiwan, 2014.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input spectrogram. May be real (magnitude) or complex.
kernel_size : int or tuple (kernel_harmonic, kernel_percussive)
kernel size(s) for the median filters.
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the width of the
harmonic filter, and the second value specifies the width
of the percussive filter.
power : float > 0 [scalar]
Exponent for the Wiener filter when constructing soft mask matrices.
mask : bool
Return the masking matrices instead of components.
Masking matrices contain non-negative real values that
can be used to measure the assignment of energy from `S`
into harmonic or percussive components.
Components can be recovered by multiplying `S * mask_H`
or `S * mask_P`.
margin : float or tuple (margin_harmonic, margin_percussive)
margin size(s) for the masks (as described in [2]_)
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the margin of the
harmonic mask, and the second value specifies the margin
of the percussive mask.
Returns
-------
harmonic : np.ndarray [shape=(d, n)]
harmonic component (or mask)
percussive : np.ndarray [shape=(d, n)]
percussive component (or mask)
See Also
--------
util.softmask
Notes
-----
This function caches at level 30.
Examples
--------
Separate into harmonic and percussive
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> D = librosa.stft(y)
>>> H, P = librosa.decompose.hpss(D)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Full power spectrogram')
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(H,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Harmonic power spectrogram')
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(P,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Percussive power spectrogram')
>>> plt.tight_layout()
Or with a narrower horizontal filter
>>> H, P = librosa.decompose.hpss(D, kernel_size=(13, 31))
Just get harmonic/percussive masks, not the spectra
>>> mask_H, mask_P = librosa.decompose.hpss(D, mask=True)
>>> mask_H
array([[ 1.000e+00, 1.469e-01, ..., 2.648e-03, 2.164e-03],
[ 1.000e+00, 2.368e-01, ..., 9.413e-03, 7.703e-03],
...,
[ 8.869e-01, 5.673e-02, ..., 4.603e-02, 1.247e-05],
[ 7.068e-01, 2.194e-02, ..., 4.453e-02, 1.205e-05]], dtype=float32)
>>> mask_P
array([[ 2.858e-05, 8.531e-01, ..., 9.974e-01, 9.978e-01],
[ 1.586e-05, 7.632e-01, ..., 9.906e-01, 9.923e-01],
...,
[ 1.131e-01, 9.433e-01, ..., 9.540e-01, 1.000e+00],
[ 2.932e-01, 9.781e-01, ..., 9.555e-01, 1.000e+00]], dtype=float32)
Separate into harmonic/percussive/residual components by using a margin > 1.0
>>> H, P = librosa.decompose.hpss(D, margin=3.0)
>>> R = D - (H+P)
>>> y_harm = librosa.core.istft(H)
>>> y_perc = librosa.core.istft(P)
>>> y_resi = librosa.core.istft(R)
Get a more isolated percussive component by widening its margin
>>> H, P = librosa.decompose.hpss(D, margin=(1.0,5.0))
"""
if np.iscomplexobj(S):
S, phase = core.magphase(S)
else:
phase = 1
if np.isscalar(kernel_size):
win_harm = kernel_size
win_perc = kernel_size
else:
win_harm = kernel_size[0]
win_perc = kernel_size[1]
if np.isscalar(margin):
margin_harm = margin
margin_perc = margin
else:
margin_harm = margin[0]
margin_perc = margin[1]
# margin minimum is 1.0
if margin_harm < 1 or margin_perc < 1:
raise ParameterError("Margins must be >= 1.0. "
"A typical range is between 1 and 10.")
# Compute median filters. Pre-allocation here preserves memory layout.
harm = np.empty_like(S)
harm[:] = median_filter(S, size=(1, win_harm), mode='reflect')
perc = np.empty_like(S)
perc[:] = median_filter(S, size=(win_perc, 1), mode='reflect')
split_zeros = (margin_harm == 1 and margin_perc == 1)
mask_harm = util.softmask(harm, perc * margin_harm,
power=power,
split_zeros=split_zeros)
mask_perc = util.softmask(perc, harm * margin_perc,
power=power,
split_zeros=split_zeros)
if mask:
return mask_harm, mask_perc
return ((S * mask_harm) * phase, (S * mask_perc) * phase)
@cache(level=30)
def nn_filter(S, rec=None, aggregate=None, axis=-1, **kwargs):
'''Filtering by nearest-neighbors.
Each data point (e.g, spectrogram column) is replaced
by aggregating its nearest neighbors in feature space.
This can be useful for de-noising a spectrogram or feature matrix.
The non-local means method [1]_ can be recovered by providing a
weighted recurrence matrix as input and specifying `aggregate=np.average`.
Similarly, setting `aggregate=np.median` produces sparse de-noising
as in REPET-SIM [2]_.
.. [1] Buades, A., Coll, B., & Morel, J. M.
(2005, June). A non-local algorithm for image denoising.
In Computer Vision and Pattern Recognition, 2005.
CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.
.. [2] Rafii, Z., & Pardo, B.
(2012, October). "Music/Voice Separation Using the Similarity Matrix."
International Society for Music Information Retrieval Conference, 2012.
Parameters
----------
S : np.ndarray
The input data (spectrogram) to filter
rec : (optional) scipy.sparse.spmatrix or np.ndarray
Optionally, a pre-computed nearest-neighbor matrix
as provided by `librosa.segment.recurrence_matrix`
aggregate : function
aggregation function (default: `np.mean`)
If `aggregate=np.average`, then a weighted average is
computed according to the (per-row) weights in `rec`.
For all other aggregation functions, all neighbors
are treated equally.
axis : int
The axis along which to filter (by default, columns)
kwargs
Additional keyword arguments provided to
`librosa.segment.recurrence_matrix` if `rec` is not provided
Returns
-------
S_filtered : np.ndarray
The filtered data
Raises
------
ParameterError
if `rec` is provided and its shape is incompatible with `S`.
See also
--------
decompose
hpss
librosa.segment.recurrence_matrix
Notes
-----
This function caches at level 30.
Examples
--------
De-noise a chromagram by non-local median filtering.
By default this would use euclidean distance to select neighbors,
but this can be overridden directly by setting the `metric` parameter.
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=10)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> chroma_med = librosa.decompose.nn_filter(chroma,
... aggregate=np.median,
... metric='cosine')
To use non-local means, provide an affinity matrix and `aggregate=np.average`.
>>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity',
... metric='cosine', sparse=True)
>>> chroma_nlm = librosa.decompose.nn_filter(chroma, rec=rec,
... aggregate=np.average)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 8))
>>> plt.subplot(5, 1, 1)
>>> librosa.display.specshow(chroma, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Unfiltered')
>>> plt.subplot(5, 1, 2)
>>> librosa.display.specshow(chroma_med, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Median-filtered')
>>> plt.subplot(5, 1, 3)
>>> librosa.display.specshow(chroma_nlm, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Non-local means')
>>> plt.subplot(5, 1, 4)
>>> librosa.display.specshow(chroma - chroma_med,
... y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Original - median')
>>> plt.subplot(5, 1, 5)
>>> librosa.display.specshow(chroma - chroma_nlm,
... y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Original - NLM')
>>> plt.tight_layout()
'''
if aggregate is None:
aggregate = np.mean
if rec is None:
kwargs = dict(kwargs)
kwargs['sparse'] = True
rec = segment.recurrence_matrix(S, axis=axis, **kwargs)
elif not scipy.sparse.issparse(rec):
rec = scipy.sparse.csr_matrix(rec)
if rec.shape[0] != S.shape[axis] or rec.shape[0] != rec.shape[1]:
raise ParameterError('Invalid self-similarity matrix shape '
'rec.shape={} for S.shape={}'.format(rec.shape,
S.shape))
return __nn_filter_helper(rec.data, rec.indices, rec.indptr,
S.swapaxes(0, axis), aggregate).swapaxes(0, axis)
def __nn_filter_helper(R_data, R_indices, R_ptr, S, aggregate):
'''Nearest-neighbor filter helper function.
This is an internal function, not for use outside of the decompose module.
It applies the nearest-neighbor filter to S, assuming that the first index
corresponds to observations.
Parameters
----------
R_data, R_indices, R_ptr : np.ndarrays
The `data`, `indices`, and `indptr` of a scipy.sparse matrix
S : np.ndarray
The observation data to filter
aggregate : callable
The aggregation operator
Returns
-------
S_out : np.ndarray like S
The filtered data array
'''
s_out = np.empty_like(S)
for i in range(len(R_ptr)-1):
# Get the non-zeros out of the recurrence matrix
targets = R_indices[R_ptr[i]:R_ptr[i+1]]
if not len(targets):
s_out[i] = S[i]
continue
neighbors = np.take(S, targets, axis=0)
if aggregate is np.average:
weights = R_data[R_ptr[i]:R_ptr[i+1]]
s_out[i] = aggregate(neighbors, axis=0, weights=weights)
else:
s_out[i] = aggregate(neighbors, axis=0)
return s_out
| librosa/decompose.py | 18,417 | Nearest-neighbor filter helper function.
This is an internal function, not for use outside of the decompose module.
It applies the nearest-neighbor filter to S, assuming that the first index
corresponds to observations.
Parameters
----------
R_data, R_indices, R_ptr : np.ndarrays
The `data`, `indices`, and `indptr` of a scipy.sparse matrix
S : np.ndarray
The observation data to filter
aggregate : callable
The aggregation operator
Returns
-------
S_out : np.ndarray like S
The filtered data array
Decompose a feature matrix.
Given a spectrogram `S`, produce a decomposition into `components`
and `activations` such that `S ~= components.dot(activations)`.
By default, this is done with with non-negative matrix factorization (NMF),
but any `sklearn.decomposition`-type object will work.
Parameters
----------
S : np.ndarray [shape=(n_features, n_samples), dtype=float]
The input feature matrix (e.g., magnitude spectrogram)
n_components : int > 0 [scalar] or None
number of desired components
if None, then `n_features` components are used
transformer : None or object
If None, use `sklearn.decomposition.NMF`
Otherwise, any object with a similar interface to NMF should work.
`transformer` must follow the scikit-learn convention, where
input data is `(n_samples, n_features)`.
`transformer.fit_transform()` will be run on `S.T` (not `S`),
the return value of which is stored (transposed) as `activations`
The components will be retrieved as `transformer.components_.T`
`S ~= np.dot(activations, transformer.components_).T`
or equivalently:
`S ~= np.dot(transformer.components_.T, activations.T)`
sort : bool
If `True`, components are sorted by ascending peak frequency.
.. note:: If used with `transformer`, sorting is applied to copies
of the decomposition parameters, and not to `transformer`'s
internal parameters.
fit : bool
If `True`, components are estimated from the input ``S``.
If `False`, components are assumed to be pre-computed and stored
in ``transformer``, and are not changed.
kwargs : Additional keyword arguments to the default transformer
`sklearn.decomposition.NMF`
Returns
-------
components: np.ndarray [shape=(n_features, n_components)]
matrix of components (basis elements).
activations: np.ndarray [shape=(n_components, n_samples)]
transformed matrix/activation matrix
Raises
------
ParameterError
if `fit` is False and no `transformer` object is provided.
See Also
--------
sklearn.decomposition : SciKit-Learn matrix decomposition modules
Examples
--------
Decompose a magnitude spectrogram into 32 components with NMF
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> comps, acts = librosa.decompose.decompose(S, n_components=8)
>>> comps
array([[ 1.876e-01, 5.559e-02, ..., 1.687e-01, 4.907e-02],
[ 3.148e-01, 1.719e-01, ..., 2.314e-01, 9.493e-02],
...,
[ 1.561e-07, 8.564e-08, ..., 7.167e-08, 4.997e-08],
[ 1.531e-07, 7.880e-08, ..., 5.632e-08, 4.028e-08]])
>>> acts
array([[ 4.197e-05, 8.512e-03, ..., 3.056e-05, 9.159e-06],
[ 9.568e-06, 1.718e-02, ..., 3.322e-05, 7.869e-06],
...,
[ 5.982e-05, 1.311e-02, ..., -0.000e+00, 6.323e-06],
[ 3.782e-05, 7.056e-03, ..., 3.290e-05, -0.000e+00]])
Sort components by ascending peak frequency
>>> comps, acts = librosa.decompose.decompose(S, n_components=16,
... sort=True)
Or with sparse dictionary learning
>>> import sklearn.decomposition
>>> T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=16)
>>> scomps, sacts = librosa.decompose.decompose(S, transformer=T, sort=True)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10,8))
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Input spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(3, 2, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(comps,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Components')
>>> plt.subplot(3, 2, 4)
>>> librosa.display.specshow(acts, x_axis='time')
>>> plt.ylabel('Components')
>>> plt.title('Activations')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> S_approx = comps.dot(acts)
>>> librosa.display.specshow(librosa.amplitude_to_db(S_approx,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Reconstructed spectrogram')
>>> plt.tight_layout()
Median-filtering harmonic percussive source separation (HPSS).
If `margin = 1.0`, decomposes an input spectrogram `S = H + P`
where `H` contains the harmonic components,
and `P` contains the percussive components.
If `margin > 1.0`, decomposes an input spectrogram `S = H + P + R`
where `R` contains residual components not included in `H` or `P`.
This implementation is based upon the algorithm described by [1]_ and [2]_.
.. [1] Fitzgerald, Derry.
"Harmonic/percussive separation using median filtering."
13th International Conference on Digital Audio Effects (DAFX10),
Graz, Austria, 2010.
.. [2] Driedger, Müller, Disch.
"Extending harmonic-percussive separation of audio."
15th International Society for Music Information Retrieval Conference (ISMIR 2014),
Taipei, Taiwan, 2014.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input spectrogram. May be real (magnitude) or complex.
kernel_size : int or tuple (kernel_harmonic, kernel_percussive)
kernel size(s) for the median filters.
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the width of the
harmonic filter, and the second value specifies the width
of the percussive filter.
power : float > 0 [scalar]
Exponent for the Wiener filter when constructing soft mask matrices.
mask : bool
Return the masking matrices instead of components.
Masking matrices contain non-negative real values that
can be used to measure the assignment of energy from `S`
into harmonic or percussive components.
Components can be recovered by multiplying `S * mask_H`
or `S * mask_P`.
margin : float or tuple (margin_harmonic, margin_percussive)
margin size(s) for the masks (as described in [2]_)
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the margin of the
harmonic mask, and the second value specifies the margin
of the percussive mask.
Returns
-------
harmonic : np.ndarray [shape=(d, n)]
harmonic component (or mask)
percussive : np.ndarray [shape=(d, n)]
percussive component (or mask)
See Also
--------
util.softmask
Notes
-----
This function caches at level 30.
Examples
--------
Separate into harmonic and percussive
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> D = librosa.stft(y)
>>> H, P = librosa.decompose.hpss(D)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Full power spectrogram')
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(H,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Harmonic power spectrogram')
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(P,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Percussive power spectrogram')
>>> plt.tight_layout()
Or with a narrower horizontal filter
>>> H, P = librosa.decompose.hpss(D, kernel_size=(13, 31))
Just get harmonic/percussive masks, not the spectra
>>> mask_H, mask_P = librosa.decompose.hpss(D, mask=True)
>>> mask_H
array([[ 1.000e+00, 1.469e-01, ..., 2.648e-03, 2.164e-03],
[ 1.000e+00, 2.368e-01, ..., 9.413e-03, 7.703e-03],
...,
[ 8.869e-01, 5.673e-02, ..., 4.603e-02, 1.247e-05],
[ 7.068e-01, 2.194e-02, ..., 4.453e-02, 1.205e-05]], dtype=float32)
>>> mask_P
array([[ 2.858e-05, 8.531e-01, ..., 9.974e-01, 9.978e-01],
[ 1.586e-05, 7.632e-01, ..., 9.906e-01, 9.923e-01],
...,
[ 1.131e-01, 9.433e-01, ..., 9.540e-01, 1.000e+00],
[ 2.932e-01, 9.781e-01, ..., 9.555e-01, 1.000e+00]], dtype=float32)
Separate into harmonic/percussive/residual components by using a margin > 1.0
>>> H, P = librosa.decompose.hpss(D, margin=3.0)
>>> R = D - (H+P)
>>> y_harm = librosa.core.istft(H)
>>> y_perc = librosa.core.istft(P)
>>> y_resi = librosa.core.istft(R)
Get a more isolated percussive component by widening its margin
>>> H, P = librosa.decompose.hpss(D, margin=(1.0,5.0))
Filtering by nearest-neighbors.
Each data point (e.g, spectrogram column) is replaced
by aggregating its nearest neighbors in feature space.
This can be useful for de-noising a spectrogram or feature matrix.
The non-local means method [1]_ can be recovered by providing a
weighted recurrence matrix as input and specifying `aggregate=np.average`.
Similarly, setting `aggregate=np.median` produces sparse de-noising
as in REPET-SIM [2]_.
.. [1] Buades, A., Coll, B., & Morel, J. M.
(2005, June). A non-local algorithm for image denoising.
In Computer Vision and Pattern Recognition, 2005.
CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.
.. [2] Rafii, Z., & Pardo, B.
(2012, October). "Music/Voice Separation Using the Similarity Matrix."
International Society for Music Information Retrieval Conference, 2012.
Parameters
----------
S : np.ndarray
The input data (spectrogram) to filter
rec : (optional) scipy.sparse.spmatrix or np.ndarray
Optionally, a pre-computed nearest-neighbor matrix
as provided by `librosa.segment.recurrence_matrix`
aggregate : function
aggregation function (default: `np.mean`)
If `aggregate=np.average`, then a weighted average is
computed according to the (per-row) weights in `rec`.
For all other aggregation functions, all neighbors
are treated equally.
axis : int
The axis along which to filter (by default, columns)
kwargs
Additional keyword arguments provided to
`librosa.segment.recurrence_matrix` if `rec` is not provided
Returns
-------
S_filtered : np.ndarray
The filtered data
Raises
------
ParameterError
if `rec` is provided and its shape is incompatible with `S`.
See also
--------
decompose
hpss
librosa.segment.recurrence_matrix
Notes
-----
This function caches at level 30.
Examples
--------
De-noise a chromagram by non-local median filtering.
By default this would use euclidean distance to select neighbors,
but this can be overridden directly by setting the `metric` parameter.
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=10)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> chroma_med = librosa.decompose.nn_filter(chroma,
... aggregate=np.median,
... metric='cosine')
To use non-local means, provide an affinity matrix and `aggregate=np.average`.
>>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity',
... metric='cosine', sparse=True)
>>> chroma_nlm = librosa.decompose.nn_filter(chroma, rec=rec,
... aggregate=np.average)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 8))
>>> plt.subplot(5, 1, 1)
>>> librosa.display.specshow(chroma, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Unfiltered')
>>> plt.subplot(5, 1, 2)
>>> librosa.display.specshow(chroma_med, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Median-filtered')
>>> plt.subplot(5, 1, 3)
>>> librosa.display.specshow(chroma_nlm, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Non-local means')
>>> plt.subplot(5, 1, 4)
>>> librosa.display.specshow(chroma - chroma_med,
... y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Original - median')
>>> plt.subplot(5, 1, 5)
>>> librosa.display.specshow(chroma - chroma_nlm,
... y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Original - NLM')
>>> plt.tight_layout()
Spectrogram decomposition
=========================
.. autosummary::
:toctree: generated/
decompose
hpss
nn_filter
!/usr/bin/env python -*- coding: utf-8 -*- margin minimum is 1.0 Compute median filters. Pre-allocation here preserves memory layout. Get the non-zeros out of the recurrence matrix | 13,373 | en | 0.527832 |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from gazebo_msgs/SetJointTrajectoryRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import trajectory_msgs.msg
import geometry_msgs.msg
import genpy
import std_msgs.msg
class SetJointTrajectoryRequest(genpy.Message):
_md5sum = "649dd2eba5ffd358069238825f9f85ab"
_type = "gazebo_msgs/SetJointTrajectoryRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string model_name
trajectory_msgs/JointTrajectory joint_trajectory
geometry_msgs/Pose model_pose
bool set_model_pose
bool disable_physics_updates
================================================================================
MSG: trajectory_msgs/JointTrajectory
Header header
string[] joint_names
JointTrajectoryPoint[] points
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: trajectory_msgs/JointTrajectoryPoint
# Each trajectory point specifies either positions[, velocities[, accelerations]]
# or positions[, effort] for the trajectory to be executed.
# All specified values are in the same order as the joint names in JointTrajectory.msg
float64[] positions
float64[] velocities
float64[] accelerations
float64[] effort
duration time_from_start
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['model_name','joint_trajectory','model_pose','set_model_pose','disable_physics_updates']
_slot_types = ['string','trajectory_msgs/JointTrajectory','geometry_msgs/Pose','bool','bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
model_name,joint_trajectory,model_pose,set_model_pose,disable_physics_updates
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetJointTrajectoryRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.model_name is None:
self.model_name = ''
if self.joint_trajectory is None:
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
if self.model_pose is None:
self.model_pose = geometry_msgs.msg.Pose()
if self.set_model_pose is None:
self.set_model_pose = False
if self.disable_physics_updates is None:
self.disable_physics_updates = False
else:
self.model_name = ''
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
self.model_pose = geometry_msgs.msg.Pose()
self.set_model_pose = False
self.disable_physics_updates = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.model_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs))
_x = self.joint_trajectory.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.joint_trajectory.joint_names)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.joint_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
length = len(self.joint_trajectory.points)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.points:
length = len(val1.positions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val1.positions))
length = len(val1.velocities)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val1.velocities))
length = len(val1.accelerations)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val1.accelerations))
length = len(val1.effort)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val1.effort))
_v1 = val1.time_from_start
_x = _v1
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_x = self
buff.write(_get_struct_7d2B().pack(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.joint_trajectory is None:
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
if self.model_pose is None:
self.model_pose = geometry_msgs.msg.Pose()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.model_name = str[start:end].decode('utf-8')
else:
self.model_name = str[start:end]
_x = self
start = end
end += 12
(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.joint_trajectory.header.frame_id = str[start:end].decode('utf-8')
else:
self.joint_trajectory.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.joint_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.joint_trajectory.joint_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.points = []
for i in range(0, length):
val1 = trajectory_msgs.msg.JointTrajectoryPoint()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.positions = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.velocities = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.accelerations = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.effort = struct.unpack(pattern, str[start:end])
_v2 = val1.time_from_start
_x = _v2
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
self.joint_trajectory.points.append(val1)
_x = self
start = end
end += 58
(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates,) = _get_struct_7d2B().unpack(str[start:end])
self.set_model_pose = bool(self.set_model_pose)
self.disable_physics_updates = bool(self.disable_physics_updates)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.model_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs))
_x = self.joint_trajectory.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.joint_trajectory.joint_names)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.joint_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
length = len(self.joint_trajectory.points)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.points:
length = len(val1.positions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val1.positions.tostring())
length = len(val1.velocities)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val1.velocities.tostring())
length = len(val1.accelerations)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val1.accelerations.tostring())
length = len(val1.effort)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val1.effort.tostring())
_v3 = val1.time_from_start
_x = _v3
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_x = self
buff.write(_get_struct_7d2B().pack(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.joint_trajectory is None:
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
if self.model_pose is None:
self.model_pose = geometry_msgs.msg.Pose()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.model_name = str[start:end].decode('utf-8')
else:
self.model_name = str[start:end]
_x = self
start = end
end += 12
(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.joint_trajectory.header.frame_id = str[start:end].decode('utf-8')
else:
self.joint_trajectory.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.joint_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.joint_trajectory.joint_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.points = []
for i in range(0, length):
val1 = trajectory_msgs.msg.JointTrajectoryPoint()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.positions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.velocities = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.accelerations = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.effort = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
_v4 = val1.time_from_start
_x = _v4
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
self.joint_trajectory.points.append(val1)
_x = self
start = end
end += 58
(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates,) = _get_struct_7d2B().unpack(str[start:end])
self.set_model_pose = bool(self.set_model_pose)
self.disable_physics_updates = bool(self.disable_physics_updates)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_2i = None
def _get_struct_2i():
global _struct_2i
if _struct_2i is None:
_struct_2i = struct.Struct("<2i")
return _struct_2i
_struct_7d2B = None
def _get_struct_7d2B():
global _struct_7d2B
if _struct_7d2B is None:
_struct_7d2B = struct.Struct("<7d2B")
return _struct_7d2B
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from gazebo_msgs/SetJointTrajectoryResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SetJointTrajectoryResponse(genpy.Message):
_md5sum = "2ec6f3eff0161f4257b808b12bc830c2"
_type = "gazebo_msgs/SetJointTrajectoryResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool success
string status_message
"""
__slots__ = ['success','status_message']
_slot_types = ['bool','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success,status_message
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetJointTrajectoryResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.success is None:
self.success = False
if self.status_message is None:
self.status_message = ''
else:
self.success = False
self.status_message = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.success))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.success))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
class SetJointTrajectory(object):
_type = 'gazebo_msgs/SetJointTrajectory'
_md5sum = '88f5c10979e3f9649d5ae87a3b12aa65'
_request_class = SetJointTrajectoryRequest
_response_class = SetJointTrajectoryResponse
| files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/srv/_SetJointTrajectory.py | 22,580 | Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
model_name,joint_trajectory,model_pose,set_model_pose,disable_physics_updates
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success,status_message
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
internal API method
internal API method
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
serialize message into buffer
:param buff: buffer, ``StringIO``
serialize message into buffer
:param buff: buffer, ``StringIO``
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
autogenerated by genpy from gazebo_msgs/SetJointTrajectoryRequest.msg. Do not edit.
This Python file uses the following encoding: utf-8flag to mark the presence of a Header objectmessage fields cannot be None, assign default values for those that aremost likely buffer underfillmost likely buffer underfill This Python file uses the following encoding: utf-8flag to mark the presence of a Header objectmessage fields cannot be None, assign default values for those that aremost likely buffer underfillmost likely buffer underfill | 2,502 | en | 0.563727 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
__author__ = 'Tim Schneider <tim.schneider@northbridge-development.de>'
__copyright__ = "Copyright 2015, Northbridge Development Konrad & Schneider GbR"
__credits__ = ["Tim Schneider", ]
__maintainer__ = "Tim Schneider"
__email__ = "mail@northbridge-development.de"
__status__ = "Development"
logger = logging.getLogger(__name__)
import glob
import os
import sys
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
print BASE_DIR
sys.path.insert(0, os.path.abspath(BASE_DIR))
try:
import coverage # Import coverage if available
cov = coverage.coverage(
cover_pylib=False,
config_file=os.path.join(os.path.dirname(__file__), 'coverage.conf'),
include='%s/*' % BASE_DIR,
)
cov.start()
sys.stdout.write('Using coverage\n')
except ImportError:
cov = None
sys.stdout.write('Coverage not available. To evaluate the coverage, please install coverage.\n')
import django
from django.conf import settings
from django.core.management import execute_from_command_line
# Unfortunately, apps can not be installed via ``modify_settings``
# decorator, because it would miss the database setup.
INSTALLED_APPS = (
'django_splitdate',
)
settings.configure(
SECRET_KEY="django_tests_secret_key",
DEBUG=False,
TEMPLATE_DEBUG=False,
ALLOWED_HOSTS=[],
INSTALLED_APPS=INSTALLED_APPS,
MIDDLEWARE_CLASSES=[],
ROOT_URLCONF='tests.urls',
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
LANGUAGE_CODE='en-us',
TIME_ZONE='UTC',
USE_I18N=True,
USE_L10N=True,
USE_TZ=True,
STATIC_URL='/static/',
# Use a fast hasher to speed up tests.
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
FIXTURE_DIRS=glob.glob(BASE_DIR + '/' + '*/fixtures/')
)
django.setup()
args = [sys.argv[0], 'test']
# Current module (``tests``) and its submodules.
test_cases = '.'
# Allow accessing test options from the command line.
offset = 1
try:
sys.argv[1]
except IndexError:
pass
else:
option = sys.argv[1].startswith('-')
if not option:
test_cases = sys.argv[1]
offset = 2
args.append(test_cases)
# ``verbosity`` can be overwritten from command line.
#args.append('--verbosity=2')
args.extend(sys.argv[offset:])
execute_from_command_line(args)
if cov is not None:
sys.stdout.write('Evaluating Coverage\n')
cov.stop()
cov.save()
sys.stdout.write('Generating HTML Report\n')
cov.html_report() | django_splitdate/tests/runtests.py | 2,603 | !/usr/bin/env python -*- coding: utf-8 -*- Import coverage if available Unfortunately, apps can not be installed via ``modify_settings`` decorator, because it would miss the database setup. Use a fast hasher to speed up tests. Current module (``tests``) and its submodules. Allow accessing test options from the command line. ``verbosity`` can be overwritten from command line.args.append('--verbosity=2') | 405 | en | 0.679608 |
#-------------------------------------------------------------------------------
#
# Base class for all door sensors
#
import iofun
import message
from device import Device
from querier import Querier
from querier import MsgHandler
from dbbuilder import GenericDBBuilder
from linkdb import LightDBRecordFormatter
from us.pfrommer.insteon.msg import InsteonAddress
class DefaultMsgHandler(MsgHandler):
label = None
def __init__(self, l):
self.label = l
def processMsg(self, msg):
iofun.out(self.label + " got msg: " + msg.toString())
return 1
# class StatusMsgHandler(MsgHandler):
# label = None
# def __init__(self, l):
# self.label = l
# def processMsg(self, msg):
# if (msg.getByte("command2") == 0xFF):
# iofun.out(" Status: Open")
# elif (msg.getByte("command2") == 0x00):
# iofun.out(" Status: Closed")
# return 1
class StatusMsgHandler(MsgHandler):
label = None
def __init__(self, l):
self.label = l
def processMsg(self, msg):
if msg.isExtended():
rawflags = msg.getByte("userData3") & 0xFF
flags = bin(msg.getByte("userData3") & 0xFF)[2:].zfill(8)
batterylevel = msg.getByte("userData4") & 0xFF
rawopenclosed = msg.getByte("userData5") & 0xFF
if (rawopenclosed == 0):
openclosed = "Open"
elif (rawopenclosed == 255):
openclosed = "Closed"
else:
openclosed = "Error reading status"
rawheartbeatint = msg.getByte("userData6") & 0xFF ## heartbeat interval = this value * 5minutes. 0x00 = 24 hours (default)
if (rawheartbeatint == 0):
heartbeatint = 24*60
else:
heartbeatint = rawheartbeatint * 5
lowbatterythreshold = msg.getByte("userData7") & 0xFF
# Bit 0
if (rawflags & 0b00000001 == 1):
cleanupreport = "Send Cleanup Report"
else:
cleanupreport = "Don't Send Cleanup Report"
# Bit 1
if (rawflags & 0b00000010 == 2):
twogroups = "Send Open on Group 1 ON and Closed on Group 2 ON"
else:
twogroups = "Send both Open and Closed on Group 1 (On=Open and Off=Closed)"
# Bit 2
if (rawflags & 0b00000100 == 4):
openrepeat = "Send Repeated Open Commands (Every 5 mins for 50 mins)"
else:
openrepeat = "Don't Send Repeated Open Commands"
# Bit 3
if (rawflags & 0b00001000 == 8):
closedrepeat = "Send Repeated Closed Commands (Every 5 mins for 50 mins)"
else:
closedrepeat = "Don't Send Repeated Closed Commands"
# Bit 4
if (rawflags & 0b00010000 == 16):
ffgrp = "Link to FF Group"
else:
ffgrp = "Don't link to FF Group"
# Bit 5
if (rawflags & 0b00100000 == 32):
ledonoff = "LED does not blink on transmission"
else:
ledonoff = "LED blinks on transmission"
# Bit 6
if (rawflags & 0b01000000 == 64):
noeffect = "No Effect"
else:
noeffect = "No Effect"
# Bit 7
if (rawflags & 0b10000000 == 128):
plock = "Programming lock on"
else:
plock = "Programming lock off"
iofun.out(self.label + " Battery level: " + format(batterylevel, 'd') + " Low Battery threshold: " + format(lowbatterythreshold, 'd'))
iofun.out(" Sensor Status: " + format(openclosed, 'd'))
iofun.out(" Heartbeat Set Value: " + format(rawheartbeatint , 'd'))
iofun.out(" Heartbeat Time: " + format(heartbeatint, 'd') + " minutes")
iofun.out(" Configuration Byte (hex): " + format(rawflags,'X'))
iofun.out(" Configuration Byte (binary): " + format(flags, 'd'))
iofun.out(" Bit 0: 1 = Send Cleanup Report, 0 = Don't Send Cleanup Report")
iofun.out(" Bit 1: 1 = Send Open on Group 1 ON / Closed on Group 2 ON, 0 = Send both Open and Closed on Group 1 (On=Open and Off=Closed)")
iofun.out(" Bit 2: 1 = Send Repeated Open Commands, 0 = Don't Send Repeated Open Commands")
iofun.out(" Bit 3: 1 = Send Repeated Closed Commands, 0 = Don't Send Repeated Closed Commands")
iofun.out(" Bit 4: 1 = Link to FF Group, 0 = Don't link to FF Group")
iofun.out(" Bit 5: 1 = LED does not blink on transmission, 0 = LED blinks on transmission")
iofun.out(" Bit 6: No Effect")
iofun.out(" Bit 7: 1 = Programming lock on, 0 = Programming Lock off")
iofun.out("\nCurrent Config Byte Setting:")
iofun.out("\n\t" + cleanupreport + "\n\t" + twogroups + "\n\t" + openrepeat + "\n\t"+ closedrepeat + "\n\t" + ffgrp + "\n\t" + ledonoff + "\n\t" + noeffect + "\n\t" + plock)
return 1
else:
iofun.out(self.label + " unexpected direct message: " + msg.toString())
return 0
iofun.out(self.label + " = " + format(tmp, '02d'))
return 1
class BatMsgHandler(MsgHandler):
label = None
def __init__(self, l):
self.label = l
def processMsg(self, msg):
battery = msg.getByte("command2") & 0xFF
iofun.out(" battery level: " + format(battery, 'd'))
return 1
class HiddenDoorSensor(Device):
"""============== Insteon Hidden Door Sensor ===============
NOTE: 1) The sensor must be awake in order for you to read/write data from/to it
2) Press and hold the link button to put it into Link mode. This is the best way to ensure it is awake
3) Use modem.startWatch() / modem.stopWatch() to see incoming messages
"""
def __init__(self, name, addr):
Device.__init__(self, name, addr)
self.dbbuilder = GenericDBBuilder(addr, self.db)
self.db.setRecordFormatter(LightDBRecordFormatter())
# def getStatus(self):
# """getStatus()"""
# self.querier.setMsgHandler(DefaultMsgHandler("status"))
# return self.querier.queryext(0x19, 0x00, [0,0,0])
def getStatus(self):
"""getStatus()
Reads and diplays all of the device settings as well as current open/closed position"""
self.querier.setMsgHandler(StatusMsgHandler("\nHidden Door Sensor Status and Settings\n"))
return self.querier.queryext(0x2E, 0x00, [0x01, 0x00, 0x00])
def getBatLevel(self):
"""getBatLevel()
Reports battary level as a decimal number [61=~1.75v 54=~1.6 51=~1.5 40=~1.25]"""
self.querier.setMsgHandler(BatMsgHandler("Get Bat Level"))
return self.querier.queryext(0x19, 0x01, [0,0,0])
def getFlags(self):
"""getFlags()
Reads and displays operating flags"""
iofun.writeMsg(message.createStdMsg(InsteonAddress(self.getAddress()), 0x0F, 0x1F, 0x00, -1))
def getDDBCount(self):
"""getDDBCount()
Data Base Delta flag gets incremented with any change in the Database """
iofun.writeMsg(message.createStdMsg(InsteonAddress(self.getAddress()), 0x0F, 0x1F, 0x01, -1))
def setPLOn(self):
"""setPLOn()
This enables the Local Programming Lock - No Press and Hold Linking"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Programming Lock ON"))
return self.querier.queryext(0x20, 0x00, [0x00, 0x00, 0x00]);
def setPLOff(self):
"""setPLOff()
This disables the Local Programming Lock - Allows Press and Hold Linking"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Programming Lock OFF"))
return self.querier.queryext(0x20, 0x01, [0x00, 0x00, 0x00]);
def setLEDOff(self):
"""setLEDOff()
This disables the LED blink during transmission"""
self.querier.setMsgHandler(DefaultMsgHandler("Set LED OFF"))
return self.querier.queryext(0x20, 0x02, [0x00, 0x00, 0x00]);
def setLEDOn(self):
"""setLEDOn()
This enables the LED blink during transmission"""
self.querier.setMsgHandler(DefaultMsgHandler("Set LED ON"))
return self.querier.queryext(0x20, 0x03, [0x00, 0x00, 0x00]);
def setTwoGroupsOn(self):
"""setTwoGroupsOn()
This makes the HDS send an ON to group 1 for Open and an ON to group 2 for closed."""
self.querier.setMsgHandler(DefaultMsgHandler("Set Two Groups ON"))
return self.querier.queryext(0x20, 0x04, [0x00, 0x00, 0x00]);
def setTwoGroupsOff(self):
"""setTwoGroupsOff()
this makes the HDS send an ON to group 1 for open and an OFF to group 1 for closed"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Two Groups Off"))
return self.querier.queryext(0x20, 0x05, [0x00, 0x00, 0x00]);
def setLinkToAllGrpsOn(self):
"""setLinkToAllGrpsOn()
This links the HDS to all groups (Group 0xFF)"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Link to FF"))
return self.querier.queryext(0x20, 0x06, [0x00, 0x00, 0x00]);
def setLinkToAllGrpsOff(self):
"""setLinkToAllGrpsOff()
This removes the link to all groups (0xFF)"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Link to FF off"))
return self.querier.queryext(0x20, 0x07, [0x00, 0x00, 0x00]);
def setCloseRepeatOn(self):
"""setCloseRepeatOn()
This sets the HDS to send repeat closed commands every 5 mins for 50 mins"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Close Repeat ON"))
return self.querier.queryext(0x20, 0x08, [0x00, 0x00, 0x00]);
def setCloseRepeatOff(self):
"""setCloseRepeatOff()
This stops the HDS from sending repeat closed commands every 5 mins for 50 mins"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Close Repeat OFF"))
return self.querier.queryext(0x20, 0x09, [0x00, 0x00, 0x00]);
def setOpenRepeatOn(self):
"""setOpenRepeatOn()
This sets the HDS to send repeat open commands every 5 mins for 50 mins"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Open Repeat ON"))
return self.querier.queryext(0x20, 0x0A, [0x00, 0x00, 0x00]);
def setOpenRepeatOff(self):
"""setOpenRepeatOff()
This stops the HDS from sending repeat open commands every 5 mins for 50 mins"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Open Repeat OFF"))
return self.querier.queryext(0x20, 0x0B, [0x00, 0x00, 0x00]);
def setCleanupReportOff(self):
"""setCleanupReportOff()
This prevents the HDS from sending a cleanup report after changes in status"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Cleanup Report Off\n"))
return self.querier.queryext(0x20, 0x16, [0x00, 0x00, 0x00]);
def setCleanupReportOn(self):
"""setCleanupReportOn()
This allows the HDS to send a cleanup report after changes in status"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Cleanup Report On\n"))
return self.querier.queryext(0x20, 0x17, [0x00, 0x00, 0x00]);
def setHBInterval(self, level):
"""setHBInterval(level)
This sets the heartbeat interval in 5 minute increments. Value (0-255) x 5mins (0 = 24 hours)"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Heartbeat Interval"))
return self.querier.queryext(0x2E, 0x00, [0x01, 0x02, level]);
def setLowBatLevel(self, level):
"""setLowBatLevel(level)
This sets point where the HDS sends an ON command to Group 3 to indicate low battery. Value (0-255)"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Heartbeat Interval"))
return self.querier.queryext(0x2E, 0x00, [0x01, 0x03, level]);
| python/hiddendoorsensor.py | 10,625 | ------------------------------------------------------------------------------- Base class for all door sensors class StatusMsgHandler(MsgHandler): label = None def __init__(self, l): self.label = l def processMsg(self, msg): if (msg.getByte("command2") == 0xFF): iofun.out(" Status: Open") elif (msg.getByte("command2") == 0x00): iofun.out(" Status: Closed") return 1 heartbeat interval = this value * 5minutes. 0x00 = 24 hours (default) Bit 0 Bit 1 Bit 2 Bit 3 Bit 4 Bit 5 Bit 6 Bit 7 def getStatus(self): """getStatus()""" self.querier.setMsgHandler(DefaultMsgHandler("status")) return self.querier.queryext(0x19, 0x00, [0,0,0]) | 701 | en | 0.294696 |
# Copyright (c) 2020. Yul HR Kang. hk2699 at caa dot columbia dot edu.
import torch
import matplotlib.pyplot as plt
from lib.pylabyk import numpytorch as npt
from lib.pylabyk.numpytorch import npy, npys
def print_demo(p, fun):
out = fun(p)
print('-----')
print('fun: %s' % fun.__name__)
print('p:')
print(p)
print('out[0]:')
print(out[0])
print('out[1]:')
print(out[1])
print('out[0].sum(), out[1].sum()')
print(out[0].sum(), out[1].sum())
if __name__ == '__main__':
for p, fun in [
(torch.tensor([
[0., 1.],
[0.5, 0.5]
]) * 1., npt.min_distrib),
(torch.tensor([
[1., 0.],
[0.5, 0.5]
]) * 1., npt.min_distrib),
(torch.tensor([
[0.5, 0.5],
[0.5, 0.5]
]) * 0.1, npt.min_distrib),
(torch.tensor([
[0., 1.],
[0.5, 0.5]
]) * 1., npt.max_distrib),
(torch.tensor([
[1., 0.],
[0.5, 0.5]
]) * 1., npt.max_distrib),
(torch.tensor([
[0.5, 0.5],
[0.5, 0.5]
]) * 0.1, npt.max_distrib),
]:
print_demo(p, fun)
| demo/demo_min_max_distrib.py | 1,205 | Copyright (c) 2020. Yul HR Kang. hk2699 at caa dot columbia dot edu. | 68 | en | 0.325436 |
def onehot_encode_seq(sequence, m=0, padding=False):
"""Converts a given IUPAC DNA sequence to a one-hot
encoded DNA sequence.
"""
import numpy as np
import torch
valid_keys = ['a','c','g','t','u','n','r','y','s','w','k','m']
nucs = {'a':0,'c':1,'g':2,'t':3,'u':3}
if padding:
assert m != 0, "If using padding, m should be bigger than 0"
padding_mat = np.tile(0.25,(m-1,4))
onehot = np.tile(.0,(len(sequence),4))
for i,char in enumerate(sequence.lower()):
if char not in valid_keys:
sys.exit("invalid char in sequence (choose from acgt and nryswkm)")
elif char == 'n':
onehot[i,:] = 0.25
elif char == 'r':
onehot[i,(0,2)] = 0.5
elif char == 'y':
onehot[i,(1,3)] = 0.5
elif char == 's':
onehot[i,(1,2)] = 0.5
elif char == 'w':
onehot[i,(0,3)] = 0.5
elif char == 'k':
onehot[i,(2,3)] = 0.5
elif char == 'm':
onehot[i,(0,1)] = 0.5
else:
onehot[i,nucs[char]] = 1
if padding:
onehot = np.concatenate((padding_mat, onehot, padding_mat))
return onehot
def save_meme(motifs_ppm_dict, output_file="found_motifs.meme"):
"""Saves the found PPMs (given as dictionary) to a file that's
compatible with MEME suite applications.
"""
import pandas as pd
meme_string = ["MEME version 4", "", "ALPHABET= ACGT", "", "strands: + -", ""]
for idx,key in enumerate(motifs_ppm_dict.keys()):
curr_motif = pd.DataFrame(motifs_ppm_dict[key])
s1 = "MOTIF " + str(key)
s2 = "letter-probability matrix: alength= " + str(curr_motif.shape[1]) + " w= " + str(curr_motif.shape[0])
s3 = curr_motif.to_csv(sep="\t", index=False, header=False)
meme_string = meme_string + [s1, s2, s3]
meme_string = "\n".join(meme_string)
with open(output_file, 'w') as the_file:
the_file.write(meme_string)
print("wrote meme list")
def align_conv_filters(model, input_seqs, m, train_ind):
"""Aligns the convolutional filters of a given scover model back
to the given input sequences at the given indices.
"""
# Motif analysis
import numpy as np
import torch
from tqdm import trange
activation_seqs = input_seqs[train_ind]
with torch.no_grad():
model.eval()
activations = model.conv_1(activation_seqs).cpu().detach().numpy().squeeze()
n_seq = activation_seqs.shape[0]
activation_seqs = activation_seqs.squeeze()
seq_len = activation_seqs.shape[1]
d = activations.shape[1]
motifs_pfm_dict = dict() # store pfms in this dict
motifs_ppm_dict = dict() # store pwms in this dict
# cycle through convolutional filters
for filter_num in trange(d):
# select activations for filter. new array = nseq x length seq
curr_activation = activations[:,filter_num,:]
# get those sequences that have positive values
seq_has_pos_vals = np.argwhere(np.amax(curr_activation, axis=1) > 0)[:,0]
# in the case that there is a minmum of 10 sequences that activate the filter
if seq_has_pos_vals.shape[0] > 10:
# per sequence, get position of maximum activation
per_seq_where_max_pos = np.argmax(curr_activation[seq_has_pos_vals], axis=1)
curr_activation_seqs = activation_seqs[seq_has_pos_vals]
curr_str_list = []
# go through sequences and save to curr_str_list
for i in range(seq_has_pos_vals.shape[0]):
# maximum activation
curr_max = per_seq_where_max_pos[i]
# get subsequence that activated filter (max 1 per seq)
curr_str_list.append(curr_activation_seqs[i][curr_max:(curr_max+m)])
# put them together in a numpy array
sequence_array = np.stack(curr_str_list)
# get sum per position
sequence_array_summed = np.sum(sequence_array,axis=0)
# save pfm
motifs_pfm_dict[str(filter_num)] = sequence_array_summed
# get counts per row
row_sums = np.sum(sequence_array_summed, axis=1)
# convert pfm to ppm
sequence_array_summed = np.nan_to_num(sequence_array_summed / row_sums[:, np.newaxis])
motifs_ppm_dict[str(filter_num)] = sequence_array_summed
return motifs_pfm_dict, motifs_ppm_dict
def randomize_sequences(sequences):
"""Randomly permutes a set of DNA sequences.
"""
import random
shuffled_seqs = []
for seq in sequences:
shuffled_seqs.append(''.join(random.sample(seq, len(seq))))
return shuffled_seqs
| bin/scover_utils.py | 4,864 | Aligns the convolutional filters of a given scover model back
to the given input sequences at the given indices.
Converts a given IUPAC DNA sequence to a one-hot
encoded DNA sequence.
Randomly permutes a set of DNA sequences.
Saves the found PPMs (given as dictionary) to a file that's
compatible with MEME suite applications.
Motif analysis store pfms in this dict store pwms in this dict cycle through convolutional filters select activations for filter. new array = nseq x length seq get those sequences that have positive values in the case that there is a minmum of 10 sequences that activate the filter per sequence, get position of maximum activation go through sequences and save to curr_str_list maximum activation get subsequence that activated filter (max 1 per seq) put them together in a numpy array get sum per position save pfm get counts per row convert pfm to ppm | 890 | en | 0.864923 |
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.wi.utils
@author Piotr Wójcik
@date 24.03.2011
"""
import logging
import os
from time import time
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from common.utils import ServerProxy
from wi.utils.exceptions import RestErrorException
from wi.utils.messages_ajax import error, success
from wi.utils.messages_codes import get_error, auth_error_text
REDIRECT_FIELD_NAME = 'next'
CLM = ServerProxy(settings.CLOUD_MANAGER_ADDRESS)
def check_response_errors(response, session):
"""
Checks status of response response and throws appropriate error.
"""
if response['status'] != 'ok':
from wi.utils.auth import logout
error_code = response['status']
error_msg = get_error(error_code)
raise RestErrorException(error_msg)
return response
def get_dict_from_list(list_of_dicts, key_value, key='id'):
"""
Returns dictionary with key: @prm{key} equal to @prm{key_value} from a
list of dictionaries: @prm{list_of_dicts}.
"""
for dictionary in list_of_dicts:
if dictionary.get(key) == None:
raise Exception("No key: " + key + " in dictionary.")
if dictionary.get(key) == key_value:
return dictionary
return None
def get_dicts_from_list(list_of_dicts, list_of_key_values, key='id'):
"""
Returns list of dictionaries with keys: @prm{key} equal to one from list
@prm{list_of_key_values} from a list of dictionaries: @prm{list_of_dicts}.
"""
ret = []
for dictionary in list_of_dicts:
if dictionary.get(key) == None:
raise Exception("No key: " + key + " in dictionary.")
if dictionary.get(key) in list_of_key_values:
ret.append(dictionary)
return ret
| src/wi/utils/__init__.py | 2,483 | Checks status of response response and throws appropriate error.
Returns dictionary with key: @prm{key} equal to @prm{key_value} from a
list of dictionaries: @prm{list_of_dicts}.
Returns list of dictionaries with keys: @prm{key} equal to one from list
@prm{list_of_key_values} from a list of dictionaries: @prm{list_of_dicts}.
@package src.wi.utils
@author Piotr Wójcik
@date 24.03.2011
-*- coding: utf-8 -*- @COPYRIGHT_begin Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @COPYRIGHT_end | 1,050 | en | 0.731386 |
# -*- coding: utf-8 -*-
# Copyright 2010-2020, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""Change the reference to frameworks.
Typical usage:
% change_reference_mac.py --qtdir=/path/to/qtdir/ \
--target=/path/to/target.app/Contents/MacOS/target
"""
__author__ = "horo"
import optparse
import os
from util import PrintErrorAndExit
from util import RunOrDie
def ParseOption():
"""Parse command line options."""
parser = optparse.OptionParser()
parser.add_option('--qtdir', dest='qtdir')
parser.add_option('--target', dest='target')
(opts, _) = parser.parse_args()
return opts
def GetFrameworkPath(name, version):
return '%s.framework/Versions/%s/%s' % (name, version, name)
def GetReferenceTo(framework):
return ('@executable_path/../../../ConfigDialog.app/Contents/Frameworks/%s' %
framework)
def InstallNameTool(target, reference_from, reference_to):
cmd = ['install_name_tool', '-change', reference_from, reference_to, target]
RunOrDie(cmd)
def main():
opt = ParseOption()
if not opt.qtdir:
PrintErrorAndExit('--qtdir option is mandatory.')
if not opt.target:
PrintErrorAndExit('--target option is mandatory.')
unused_qtdir = os.path.abspath(opt.qtdir) # TODO(komatsu): remove this.
target = os.path.abspath(opt.target)
# Changes the reference to QtCore framework from the target application
# From: @rpath/QtCore.framework/Versions/5/QtCore
# To: @executable_path/../../../MozcTool.app/Contents/Frameworks/...
qtcore_framework = GetFrameworkPath('QtCore', '5')
InstallNameTool(target,
'@rpath/%s' % qtcore_framework,
GetReferenceTo(qtcore_framework))
# Changes the reference to QtGui framework from the target application
qtgui_framework = GetFrameworkPath('QtGui', '5')
InstallNameTool(target,
'@rpath/%s' % qtgui_framework,
GetReferenceTo(qtgui_framework))
# Changes the reference to QtWidgets framework from the target application
qtwidgets_framework = GetFrameworkPath('QtWidgets', '5')
InstallNameTool(target,
'@rpath/%s' % qtwidgets_framework,
GetReferenceTo(qtwidgets_framework))
# Change the reference to $(branding)Tool_lib from the target application
# From: @executable_path/../Frameworks/MozcTool_lib.framework/...
# To: @executable_path/../../../ConfigDialog.app/Contents/Frameworks/...
toollib_framework = GetFrameworkPath('GuiTool_lib', 'A')
InstallNameTool(target,
'@executable_path/../Frameworks/%s' % toollib_framework,
GetReferenceTo(toollib_framework))
if __name__ == '__main__':
main()
| src/build_tools/change_reference_mac.py | 4,145 | Parse command line options.
Change the reference to frameworks.
Typical usage:
% change_reference_mac.py --qtdir=/path/to/qtdir/ \
--target=/path/to/target.app/Contents/MacOS/target
-*- coding: utf-8 -*- Copyright 2010-2020, Google Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. TODO(komatsu): remove this. Changes the reference to QtCore framework from the target application From: @rpath/QtCore.framework/Versions/5/QtCore To: @executable_path/../../../MozcTool.app/Contents/Frameworks/... Changes the reference to QtGui framework from the target application Changes the reference to QtWidgets framework from the target application Change the reference to $(branding)Tool_lib from the target application From: @executable_path/../Frameworks/MozcTool_lib.framework/... To: @executable_path/../../../ConfigDialog.app/Contents/Frameworks/... | 2,258 | en | 0.829457 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: users.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
import include.common_pb2 as common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='users.proto',
package='tinkoff.public.invest.api.contract.v1',
syntax='proto3',
serialized_options=b'\n\034ru.tinkoff.piapi.contract.v1P\001Z\021Tinkoff/investAPI\242\002\005TIAPI\252\002\024Tinkoff.InvestAPI.V1\312\002\021Tinkoff\\Invest\\V1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0busers.proto\x12%tinkoff.public.invest.api.contract.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x0c\x63ommon.proto\"\x14\n\x12GetAccountsRequest\"W\n\x13GetAccountsResponse\x12@\n\x08\x61\x63\x63ounts\x18\x01 \x03(\x0b\x32..tinkoff.public.invest.api.contract.v1.Account\"\x8d\x02\n\x07\x41\x63\x63ount\x12\n\n\x02id\x18\x01 \x01(\t\x12@\n\x04type\x18\x02 \x01(\x0e\x32\x32.tinkoff.public.invest.api.contract.v1.AccountType\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x44\n\x06status\x18\x04 \x01(\x0e\x32\x34.tinkoff.public.invest.api.contract.v1.AccountStatus\x12/\n\x0bopened_date\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x63losed_date\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"0\n\x1aGetMarginAttributesRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\"\xa8\x03\n\x1bGetMarginAttributesResponse\x12K\n\x10liquid_portfolio\x18\x01 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12J\n\x0fstarting_margin\x18\x02 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12I\n\x0eminimal_margin\x18\x03 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12Q\n\x17\x66unds_sufficiency_level\x18\x04 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12R\n\x17\x61mount_of_missing_funds\x18\x05 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\"\x16\n\x14GetUserTariffRequest\"\xab\x01\n\x15GetUserTariffResponse\x12G\n\x0cunary_limits\x18\x01 \x03(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.UnaryLimit\x12I\n\rstream_limits\x18\x02 \x03(\x0b\x32\x32.tinkoff.public.invest.api.contract.v1.StreamLimit\"7\n\nUnaryLimit\x12\x18\n\x10limit_per_minute\x18\x01 \x01(\x05\x12\x0f\n\x07methods\x18\x02 \x03(\t\"-\n\x0bStreamLimit\x12\r\n\x05limit\x18\x01 \x01(\x05\x12\x0f\n\x07streams\x18\x02 \x03(\t\"\x10\n\x0eGetInfoRequest\"\\\n\x0fGetInfoResponse\x12\x13\n\x0bprem_status\x18\x01 \x01(\x08\x12\x13\n\x0bqual_status\x18\x02 \x01(\x08\x12\x1f\n\x17qualified_for_work_with\x18\x03 \x03(\t*\x80\x01\n\x0b\x41\x63\x63ountType\x12\x1c\n\x18\x41\x43\x43OUNT_TYPE_UNSPECIFIED\x10\x00\x12\x18\n\x14\x41\x43\x43OUNT_TYPE_TINKOFF\x10\x01\x12\x1c\n\x18\x41\x43\x43OUNT_TYPE_TINKOFF_IIS\x10\x02\x12\x1b\n\x17\x41\x43\x43OUNT_TYPE_INVEST_BOX\x10\x03*{\n\rAccountStatus\x12\x1e\n\x1a\x41\x43\x43OUNT_STATUS_UNSPECIFIED\x10\x00\x12\x16\n\x12\x41\x43\x43OUNT_STATUS_NEW\x10\x01\x12\x17\n\x13\x41\x43\x43OUNT_STATUS_OPEN\x10\x02\x12\x19\n\x15\x41\x43\x43OUNT_STATUS_CLOSED\x10\x03\x32\xbb\x04\n\x0cUsersService\x12\x84\x01\n\x0bGetAccounts\x12\x39.tinkoff.public.invest.api.contract.v1.GetAccountsRequest\x1a:.tinkoff.public.invest.api.contract.v1.GetAccountsResponse\x12\x9c\x01\n\x13GetMarginAttributes\x12\x41.tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest\x1a\x42.tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse\x12\x8a\x01\n\rGetUserTariff\x12;.tinkoff.public.invest.api.contract.v1.GetUserTariffRequest\x1a<.tinkoff.public.invest.api.contract.v1.GetUserTariffResponse\x12x\n\x07GetInfo\x12\x35.tinkoff.public.invest.api.contract.v1.GetInfoRequest\x1a\x36.tinkoff.public.invest.api.contract.v1.GetInfoResponseBf\n\x1cru.tinkoff.piapi.contract.v1P\x01Z\x11Tinkoff/investAPI\xa2\x02\x05TIAPI\xaa\x02\x14Tinkoff.InvestAPI.V1\xca\x02\x11Tinkoff\\Invest\\V1b\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,common__pb2.DESCRIPTOR,])
_ACCOUNTTYPE = _descriptor.EnumDescriptor(
name='AccountType',
full_name='tinkoff.public.invest.api.contract.v1.AccountType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_TINKOFF', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_TINKOFF_IIS', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_INVEST_BOX', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1376,
serialized_end=1504,
)
_sym_db.RegisterEnumDescriptor(_ACCOUNTTYPE)
AccountType = enum_type_wrapper.EnumTypeWrapper(_ACCOUNTTYPE)
_ACCOUNTSTATUS = _descriptor.EnumDescriptor(
name='AccountStatus',
full_name='tinkoff.public.invest.api.contract.v1.AccountStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_NEW', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_OPEN', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_CLOSED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1506,
serialized_end=1629,
)
_sym_db.RegisterEnumDescriptor(_ACCOUNTSTATUS)
AccountStatus = enum_type_wrapper.EnumTypeWrapper(_ACCOUNTSTATUS)
ACCOUNT_TYPE_UNSPECIFIED = 0
ACCOUNT_TYPE_TINKOFF = 1
ACCOUNT_TYPE_TINKOFF_IIS = 2
ACCOUNT_TYPE_INVEST_BOX = 3
ACCOUNT_STATUS_UNSPECIFIED = 0
ACCOUNT_STATUS_NEW = 1
ACCOUNT_STATUS_OPEN = 2
ACCOUNT_STATUS_CLOSED = 3
_GETACCOUNTSREQUEST = _descriptor.Descriptor(
name='GetAccountsRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetAccountsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=101,
serialized_end=121,
)
_GETACCOUNTSRESPONSE = _descriptor.Descriptor(
name='GetAccountsResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetAccountsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='accounts', full_name='tinkoff.public.invest.api.contract.v1.GetAccountsResponse.accounts', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=210,
)
_ACCOUNT = _descriptor.Descriptor(
name='Account',
full_name='tinkoff.public.invest.api.contract.v1.Account',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tinkoff.public.invest.api.contract.v1.Account.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='tinkoff.public.invest.api.contract.v1.Account.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='tinkoff.public.invest.api.contract.v1.Account.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='tinkoff.public.invest.api.contract.v1.Account.status', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='opened_date', full_name='tinkoff.public.invest.api.contract.v1.Account.opened_date', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='closed_date', full_name='tinkoff.public.invest.api.contract.v1.Account.closed_date', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=213,
serialized_end=482,
)
_GETMARGINATTRIBUTESREQUEST = _descriptor.Descriptor(
name='GetMarginAttributesRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='account_id', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest.account_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=484,
serialized_end=532,
)
_GETMARGINATTRIBUTESRESPONSE = _descriptor.Descriptor(
name='GetMarginAttributesResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='liquid_portfolio', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.liquid_portfolio', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='starting_margin', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.starting_margin', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='minimal_margin', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.minimal_margin', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='funds_sufficiency_level', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.funds_sufficiency_level', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='amount_of_missing_funds', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.amount_of_missing_funds', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=535,
serialized_end=959,
)
_GETUSERTARIFFREQUEST = _descriptor.Descriptor(
name='GetUserTariffRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=961,
serialized_end=983,
)
_GETUSERTARIFFRESPONSE = _descriptor.Descriptor(
name='GetUserTariffResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='unary_limits', full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffResponse.unary_limits', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stream_limits', full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffResponse.stream_limits', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=986,
serialized_end=1157,
)
_UNARYLIMIT = _descriptor.Descriptor(
name='UnaryLimit',
full_name='tinkoff.public.invest.api.contract.v1.UnaryLimit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='limit_per_minute', full_name='tinkoff.public.invest.api.contract.v1.UnaryLimit.limit_per_minute', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='methods', full_name='tinkoff.public.invest.api.contract.v1.UnaryLimit.methods', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1159,
serialized_end=1214,
)
_STREAMLIMIT = _descriptor.Descriptor(
name='StreamLimit',
full_name='tinkoff.public.invest.api.contract.v1.StreamLimit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='limit', full_name='tinkoff.public.invest.api.contract.v1.StreamLimit.limit', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='streams', full_name='tinkoff.public.invest.api.contract.v1.StreamLimit.streams', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1216,
serialized_end=1261,
)
_GETINFOREQUEST = _descriptor.Descriptor(
name='GetInfoRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetInfoRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1263,
serialized_end=1279,
)
_GETINFORESPONSE = _descriptor.Descriptor(
name='GetInfoResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='prem_status', full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse.prem_status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qual_status', full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse.qual_status', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qualified_for_work_with', full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse.qualified_for_work_with', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1281,
serialized_end=1373,
)
_GETACCOUNTSRESPONSE.fields_by_name['accounts'].message_type = _ACCOUNT
_ACCOUNT.fields_by_name['type'].enum_type = _ACCOUNTTYPE
_ACCOUNT.fields_by_name['status'].enum_type = _ACCOUNTSTATUS
_ACCOUNT.fields_by_name['opened_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ACCOUNT.fields_by_name['closed_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['liquid_portfolio'].message_type = common__pb2._MONEYVALUE
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['starting_margin'].message_type = common__pb2._MONEYVALUE
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['minimal_margin'].message_type = common__pb2._MONEYVALUE
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['funds_sufficiency_level'].message_type = common__pb2._QUOTATION
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['amount_of_missing_funds'].message_type = common__pb2._MONEYVALUE
_GETUSERTARIFFRESPONSE.fields_by_name['unary_limits'].message_type = _UNARYLIMIT
_GETUSERTARIFFRESPONSE.fields_by_name['stream_limits'].message_type = _STREAMLIMIT
DESCRIPTOR.message_types_by_name['GetAccountsRequest'] = _GETACCOUNTSREQUEST
DESCRIPTOR.message_types_by_name['GetAccountsResponse'] = _GETACCOUNTSRESPONSE
DESCRIPTOR.message_types_by_name['Account'] = _ACCOUNT
DESCRIPTOR.message_types_by_name['GetMarginAttributesRequest'] = _GETMARGINATTRIBUTESREQUEST
DESCRIPTOR.message_types_by_name['GetMarginAttributesResponse'] = _GETMARGINATTRIBUTESRESPONSE
DESCRIPTOR.message_types_by_name['GetUserTariffRequest'] = _GETUSERTARIFFREQUEST
DESCRIPTOR.message_types_by_name['GetUserTariffResponse'] = _GETUSERTARIFFRESPONSE
DESCRIPTOR.message_types_by_name['UnaryLimit'] = _UNARYLIMIT
DESCRIPTOR.message_types_by_name['StreamLimit'] = _STREAMLIMIT
DESCRIPTOR.message_types_by_name['GetInfoRequest'] = _GETINFOREQUEST
DESCRIPTOR.message_types_by_name['GetInfoResponse'] = _GETINFORESPONSE
DESCRIPTOR.enum_types_by_name['AccountType'] = _ACCOUNTTYPE
DESCRIPTOR.enum_types_by_name['AccountStatus'] = _ACCOUNTSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAccountsRequest = _reflection.GeneratedProtocolMessageType('GetAccountsRequest', (_message.Message,), {
'DESCRIPTOR' : _GETACCOUNTSREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetAccountsRequest)
})
_sym_db.RegisterMessage(GetAccountsRequest)
GetAccountsResponse = _reflection.GeneratedProtocolMessageType('GetAccountsResponse', (_message.Message,), {
'DESCRIPTOR' : _GETACCOUNTSRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetAccountsResponse)
})
_sym_db.RegisterMessage(GetAccountsResponse)
Account = _reflection.GeneratedProtocolMessageType('Account', (_message.Message,), {
'DESCRIPTOR' : _ACCOUNT,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.Account)
})
_sym_db.RegisterMessage(Account)
GetMarginAttributesRequest = _reflection.GeneratedProtocolMessageType('GetMarginAttributesRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMARGINATTRIBUTESREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest)
})
_sym_db.RegisterMessage(GetMarginAttributesRequest)
GetMarginAttributesResponse = _reflection.GeneratedProtocolMessageType('GetMarginAttributesResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMARGINATTRIBUTESRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse)
})
_sym_db.RegisterMessage(GetMarginAttributesResponse)
GetUserTariffRequest = _reflection.GeneratedProtocolMessageType('GetUserTariffRequest', (_message.Message,), {
'DESCRIPTOR' : _GETUSERTARIFFREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetUserTariffRequest)
})
_sym_db.RegisterMessage(GetUserTariffRequest)
GetUserTariffResponse = _reflection.GeneratedProtocolMessageType('GetUserTariffResponse', (_message.Message,), {
'DESCRIPTOR' : _GETUSERTARIFFRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetUserTariffResponse)
})
_sym_db.RegisterMessage(GetUserTariffResponse)
UnaryLimit = _reflection.GeneratedProtocolMessageType('UnaryLimit', (_message.Message,), {
'DESCRIPTOR' : _UNARYLIMIT,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.UnaryLimit)
})
_sym_db.RegisterMessage(UnaryLimit)
StreamLimit = _reflection.GeneratedProtocolMessageType('StreamLimit', (_message.Message,), {
'DESCRIPTOR' : _STREAMLIMIT,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.StreamLimit)
})
_sym_db.RegisterMessage(StreamLimit)
GetInfoRequest = _reflection.GeneratedProtocolMessageType('GetInfoRequest', (_message.Message,), {
'DESCRIPTOR' : _GETINFOREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetInfoRequest)
})
_sym_db.RegisterMessage(GetInfoRequest)
GetInfoResponse = _reflection.GeneratedProtocolMessageType('GetInfoResponse', (_message.Message,), {
'DESCRIPTOR' : _GETINFORESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetInfoResponse)
})
_sym_db.RegisterMessage(GetInfoResponse)
DESCRIPTOR._options = None
_USERSSERVICE = _descriptor.ServiceDescriptor(
name='UsersService',
full_name='tinkoff.public.invest.api.contract.v1.UsersService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1632,
serialized_end=2203,
methods=[
_descriptor.MethodDescriptor(
name='GetAccounts',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetAccounts',
index=0,
containing_service=None,
input_type=_GETACCOUNTSREQUEST,
output_type=_GETACCOUNTSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetMarginAttributes',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetMarginAttributes',
index=1,
containing_service=None,
input_type=_GETMARGINATTRIBUTESREQUEST,
output_type=_GETMARGINATTRIBUTESRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetUserTariff',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetUserTariff',
index=2,
containing_service=None,
input_type=_GETUSERTARIFFREQUEST,
output_type=_GETUSERTARIFFRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetInfo',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetInfo',
index=3,
containing_service=None,
input_type=_GETINFOREQUEST,
output_type=_GETINFORESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_USERSSERVICE)
DESCRIPTOR.services_by_name['UsersService'] = _USERSSERVICE
# @@protoc_insertion_point(module_scope)
| include/users_pb2.py | 29,770 | Generated protocol buffer code.
-*- coding: utf-8 -*- Generated by the protocol buffer compiler. DO NOT EDIT! source: users.proto @@protoc_insertion_point(imports) @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetAccountsRequest) @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetAccountsResponse) @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.Account) @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest) @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse) @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetUserTariffRequest) @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetUserTariffResponse) @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.UnaryLimit) @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.StreamLimit) @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetInfoRequest) @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetInfoResponse) @@protoc_insertion_point(module_scope) | 1,240 | en | 0.337214 |
from loris.compliance.format import FormatCompliance
from loris.compliance.helpers import ComparableMixin
from loris.compliance.helpers import st
from loris.compliance.http import HttpCompliance
from loris.compliance.quality import QualityCompliance
from loris.compliance.region import RegionCompliance
from loris.compliance.rotation import RotationCompliance
from loris.compliance.size import SizeCompliance
from loris.constants import KEYWORD_MAX_AREA
from loris.constants import KEYWORD_MAX_HEIGHT
from loris.constants import KEYWORD_MAX_WIDTH
from loris.constants import QUALITY_COLOR
class Compliance(ComparableMixin):
ALL_LEVEL_1 = st(
HttpCompliance.LEVEL_1
+ QualityCompliance.LEVEL_1
+ RegionCompliance.LEVEL_1
+ RotationCompliance.LEVEL_1
+ SizeCompliance.LEVEL_1
)
ALL_LEVEL_2 = st(
FormatCompliance.LEVEL_2
+ HttpCompliance.LEVEL_2
+ QualityCompliance.LEVEL_2
+ RegionCompliance.LEVEL_2
+ RotationCompliance.LEVEL_2
+ SizeCompliance.LEVEL_2
)
def __init__(self, config):
self.format = FormatCompliance(config["formats"])
self.http = HttpCompliance(config["http"])
self.quality = QualityCompliance(config["quality"])
self.region = RegionCompliance(config["region"])
self.rotation = RotationCompliance(config["rotation"])
self.size = SizeCompliance(config["size"])
self._extra_features = None
self._int = None
self._uri = None
# make it possible to do int(self), and do comparisons
def __int__(self):
if self._int is None:
ints = map(
int, (self.format, self.http, self.quality, self.region, self.rotation, self.size)
)
self._int = min(ints)
return self._int
def __str__(self):
return f"level{int(self)}"
@property
def uri(self):
if self._uri is None:
self._uri = f"http://iiif.io/api/image/3/level{int(self)}.json"
return self._uri
@property
def all_enabled_features(self):
# Note that formats and qualities aren't 'features' and are always
# listed explicitly in the profile (other that jpg and default)
return st(
self.http.features + self.region.features + self.rotation.features + self.size.features
)
def extra_qualities(self, include_color=True):
qualities = self.quality.features
if not include_color:
qualities = tuple(filter(lambda q: q != QUALITY_COLOR, qualities))
return qualities
@property
def extra_formats(self):
return self.format.features
@property
def extra_features(self):
# Features supported above the calculated compliance level, i.e. the
# difference between all enabled features and the calculated compliance
# level. For listing in profile[1]['supports'].
if self._extra_features is None:
level_features = set(()) # 0
if int(self) == 2:
level_features = set(Compliance.ALL_LEVEL_2)
elif int(self) == 1:
level_features = set(Compliance.ALL_LEVEL_1)
self._extra_features = set(self.all_enabled_features) - level_features
return st(self._extra_features)
| loris/compliance/__init__.py | 3,340 | make it possible to do int(self), and do comparisons Note that formats and qualities aren't 'features' and are always listed explicitly in the profile (other that jpg and default) Features supported above the calculated compliance level, i.e. the difference between all enabled features and the calculated compliance level. For listing in profile[1]['supports']. 0 | 364 | en | 0.918309 |
from typing import IO, Dict, Optional, Set
from rdflib.plugins.serializers.xmlwriter import XMLWriter
from rdflib.namespace import Namespace, RDF, RDFS # , split_uri
from rdflib.plugins.parsers.RDFVOC import RDFVOC
from rdflib.graph import Graph
from rdflib.term import Identifier, URIRef, Literal, BNode
from rdflib.util import first, more_than
from rdflib.collection import Collection
from rdflib.serializer import Serializer
from xml.sax.saxutils import quoteattr, escape
import xml.dom.minidom
from .xmlwriter import ESCAPE_ENTITIES
__all__ = ["fix", "XMLSerializer", "PrettyXMLSerializer"]
class XMLSerializer(Serializer):
def __init__(self, store: Graph):
super(XMLSerializer, self).__init__(store)
def __bindings(self):
store = self.store
nm = store.namespace_manager
bindings = {}
for predicate in set(store.predicates()):
prefix, namespace, name = nm.compute_qname_strict(predicate)
bindings[prefix] = URIRef(namespace)
RDFNS = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
if "rdf" in bindings:
assert bindings["rdf"] == RDFNS
else:
bindings["rdf"] = RDFNS
for prefix, namespace in bindings.items():
yield prefix, namespace
def serialize(
self,
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = None,
**args,
):
# if base is given here, use that, if not and a base is set for the graph use that
if base is not None:
self.base = base
elif self.store.base is not None:
self.base = self.store.base
self.__stream = stream
self.__serialized: Dict[Identifier, int] = {}
encoding = self.encoding
self.write = write = lambda uni: stream.write(uni.encode(encoding, "replace"))
# startDocument
write('<?xml version="1.0" encoding="%s"?>\n' % self.encoding)
# startRDF
write("<rdf:RDF\n")
# If provided, write xml:base attribute for the RDF
if "xml_base" in args:
write(' xml:base="%s"\n' % args["xml_base"])
elif self.base:
write(' xml:base="%s"\n' % self.base)
# TODO:
# assert(
# namespaces["http://www.w3.org/1999/02/22-rdf-syntax-ns#"]=='rdf')
bindings = list(self.__bindings())
bindings.sort()
for prefix, namespace in bindings:
if prefix:
write(' xmlns:%s="%s"\n' % (prefix, namespace))
else:
write(' xmlns="%s"\n' % namespace)
write(">\n")
# write out triples by subject
for subject in self.store.subjects():
self.subject(subject, 1)
# endRDF
write("</rdf:RDF>\n")
# Set to None so that the memory can get garbage collected.
# self.__serialized = None
del self.__serialized
def subject(self, subject, depth=1):
if subject not in self.__serialized:
self.__serialized[subject] = 1
if isinstance(subject, (BNode, URIRef)):
write = self.write
indent = " " * depth
element_name = "rdf:Description"
if isinstance(subject, BNode):
write('%s<%s rdf:nodeID="%s"' % (indent, element_name, subject))
else:
uri = quoteattr(self.relativize(subject))
write("%s<%s rdf:about=%s" % (indent, element_name, uri))
if (subject, None, None) in self.store:
write(">\n")
for predicate, object in self.store.predicate_objects(subject):
self.predicate(predicate, object, depth + 1)
write("%s</%s>\n" % (indent, element_name))
else:
write("/>\n")
def predicate(self, predicate, object, depth=1):
write = self.write
indent = " " * depth
qname = self.store.namespace_manager.qname_strict(predicate)
if isinstance(object, Literal):
attributes = ""
if object.language:
attributes += ' xml:lang="%s"' % object.language
if object.datatype:
attributes += ' rdf:datatype="%s"' % object.datatype
write(
"%s<%s%s>%s</%s>\n"
% (indent, qname, attributes, escape(object, ESCAPE_ENTITIES), qname)
)
else:
if isinstance(object, BNode):
write('%s<%s rdf:nodeID="%s"/>\n' % (indent, qname, object))
else:
write(
"%s<%s rdf:resource=%s/>\n"
% (indent, qname, quoteattr(self.relativize(object)))
)
XMLLANG = "http://www.w3.org/XML/1998/namespacelang"
XMLBASE = "http://www.w3.org/XML/1998/namespacebase"
OWL_NS = Namespace("http://www.w3.org/2002/07/owl#")
# TODO:
def fix(val):
"strip off _: from nodeIDs... as they are not valid NCNames"
if val.startswith("_:"):
return val[2:]
else:
return val
class PrettyXMLSerializer(Serializer):
def __init__(self, store: Graph, max_depth=3):
super(PrettyXMLSerializer, self).__init__(store)
self.forceRDFAbout: Set[URIRef] = set()
def serialize(
self,
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = None,
**args,
):
self.__serialized: Dict[Identifier, int] = {}
store = self.store
# if base is given here, use that, if not and a base is set for the graph use that
if base is not None:
self.base = base
elif store.base is not None:
self.base = store.base
self.max_depth = args.get("max_depth", 3)
assert self.max_depth > 0, "max_depth must be greater than 0"
self.nm = nm = store.namespace_manager
self.writer = writer = XMLWriter(stream, nm, encoding)
namespaces = {}
possible = set(store.predicates()).union(store.objects(None, RDF.type))
for predicate in possible:
prefix, namespace, local = nm.compute_qname_strict(predicate)
namespaces[prefix] = namespace
namespaces["rdf"] = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
writer.push(RDFVOC.RDF)
if "xml_base" in args:
writer.attribute(XMLBASE, args["xml_base"])
elif self.base:
writer.attribute(XMLBASE, self.base)
writer.namespaces(namespaces.items())
subject: Identifier
# Write out subjects that can not be inline
for subject in store.subjects(): # type: ignore[assignment]
if (None, None, subject) in store:
if (subject, None, subject) in store:
self.subject(subject, 1)
else:
self.subject(subject, 1)
# write out anything that has not yet been reached
# write out BNodes last (to ensure they can be inlined where possible)
bnodes = set()
for subject in store.subjects(): # type: ignore[assignment]
if isinstance(subject, BNode):
bnodes.add(subject)
continue
self.subject(subject, 1)
# now serialize only those BNodes that have not been serialized yet
for bnode in bnodes:
if bnode not in self.__serialized:
self.subject(subject, 1)
writer.pop(RDFVOC.RDF)
stream.write("\n".encode("latin-1"))
# Set to None so that the memory can get garbage collected.
self.__serialized = None # type: ignore[assignment]
def subject(self, subject: Identifier, depth: int = 1):
store = self.store
writer = self.writer
if subject in self.forceRDFAbout:
writer.push(RDFVOC.Description)
writer.attribute(RDFVOC.about, self.relativize(subject))
writer.pop(RDFVOC.Description)
self.forceRDFAbout.remove(subject) # type: ignore[arg-type]
elif subject not in self.__serialized:
self.__serialized[subject] = 1
type = first(store.objects(subject, RDF.type))
try:
self.nm.qname(type)
except:
type = None
element = type or RDFVOC.Description
writer.push(element)
if isinstance(subject, BNode):
def subj_as_obj_more_than(ceil):
return True
# more_than(store.triples((None, None, subject)), ceil)
# here we only include BNode labels if they are referenced
# more than once (this reduces the use of redundant BNode
# identifiers)
if subj_as_obj_more_than(1):
writer.attribute(RDFVOC.nodeID, fix(subject))
else:
writer.attribute(RDFVOC.about, self.relativize(subject))
if (subject, None, None) in store:
for predicate, object in store.predicate_objects(subject):
if not (predicate == RDF.type and object == type):
self.predicate(predicate, object, depth + 1)
writer.pop(element)
elif subject in self.forceRDFAbout:
# TODO FIXME?: this looks like a duplicate of first condition
writer.push(RDFVOC.Description)
writer.attribute(RDFVOC.about, self.relativize(subject))
writer.pop(RDFVOC.Description)
self.forceRDFAbout.remove(subject) # type: ignore[arg-type]
def predicate(self, predicate, object, depth=1):
writer = self.writer
store = self.store
writer.push(predicate)
if isinstance(object, Literal):
if object.language:
writer.attribute(XMLLANG, object.language)
if object.datatype == RDF.XMLLiteral and isinstance(
object.value, xml.dom.minidom.Document
):
writer.attribute(RDFVOC.parseType, "Literal")
writer.text("")
writer.stream.write(object)
else:
if object.datatype:
writer.attribute(RDFVOC.datatype, object.datatype)
writer.text(object)
elif object in self.__serialized or not (object, None, None) in store:
if isinstance(object, BNode):
if more_than(store.triples((None, None, object)), 0):
writer.attribute(RDFVOC.nodeID, fix(object))
else:
writer.attribute(RDFVOC.resource, self.relativize(object))
else:
if first(store.objects(object, RDF.first)): # may not have type
# RDF.List
self.__serialized[object] = 1
# Warn that any assertions on object other than
# RDF.first and RDF.rest are ignored... including RDF.List
import warnings
warnings.warn(
"Assertions on %s other than RDF.first " % repr(object)
+ "and RDF.rest are ignored ... including RDF.List",
UserWarning,
stacklevel=2,
)
writer.attribute(RDFVOC.parseType, "Collection")
col = Collection(store, object)
for item in col:
if isinstance(item, URIRef):
self.forceRDFAbout.add(item)
self.subject(item)
if not isinstance(item, URIRef):
self.__serialized[item] = 1
else:
if first(
store.triples_choices(
(object, RDF.type, [OWL_NS.Class, RDFS.Class])
)
) and isinstance(object, URIRef):
writer.attribute(RDFVOC.resource, self.relativize(object))
elif depth <= self.max_depth:
self.subject(object, depth + 1)
elif isinstance(object, BNode):
if (
object not in self.__serialized
and (object, None, None) in store
and len(list(store.subjects(object=object))) == 1
):
# inline blank nodes if they haven't been serialized yet
# and are only referenced once (regardless of depth)
self.subject(object, depth + 1)
else:
writer.attribute(RDFVOC.nodeID, fix(object))
else:
writer.attribute(RDFVOC.resource, self.relativize(object))
writer.pop(predicate)
| rdflib/plugins/serializers/rdfxml.py | 12,931 | strip off _: from nodeIDs... as they are not valid NCNames
, split_uri if base is given here, use that, if not and a base is set for the graph use that startDocument startRDF If provided, write xml:base attribute for the RDF TODO: assert( namespaces["http://www.w3.org/1999/02/22-rdf-syntax-ns"]=='rdf') write out triples by subject endRDF Set to None so that the memory can get garbage collected. self.__serialized = None TODO: if base is given here, use that, if not and a base is set for the graph use that Write out subjects that can not be inline type: ignore[assignment] write out anything that has not yet been reached write out BNodes last (to ensure they can be inlined where possible) type: ignore[assignment] now serialize only those BNodes that have not been serialized yet Set to None so that the memory can get garbage collected. type: ignore[assignment] type: ignore[arg-type] more_than(store.triples((None, None, subject)), ceil) here we only include BNode labels if they are referenced more than once (this reduces the use of redundant BNode identifiers) TODO FIXME?: this looks like a duplicate of first condition type: ignore[arg-type] may not have type RDF.List Warn that any assertions on object other than RDF.first and RDF.rest are ignored... including RDF.List inline blank nodes if they haven't been serialized yet and are only referenced once (regardless of depth) | 1,395 | en | 0.849283 |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import os
from os.path import abspath, dirname
from restclients_core.dao import DAO
class Sdbmyuw_DAO(DAO):
def service_name(self):
return 'sdbmyuw'
def service_mock_paths(self):
return [abspath(os.path.join(dirname(__file__), "resources"))]
| uw_sdbmyuw/dao.py | 357 | Copyright 2021 UW-IT, University of Washington SPDX-License-Identifier: Apache-2.0 | 82 | en | 0.31374 |
import pandas as pd
import read_mta_turnstile as t
# This function generally generates a schedule for all stations in the df_top.csv file in a pivot table format.
def find_schedule():
# Read the stations with highest Toucan scores and select columns relavant
# to our schedule algorithm
top_stations = pd.read_csv('df_top.csv')
top_stations.rename(columns={'name':'STATION'}, inplace = True)
top_stations1 = top_stations.loc[:,['STATION','toucan_score']]
# Read the turnstile data and select the columns relavant to schedule algorithm
turnstile_data = t.read_mta_turnstile(start='20180501', end='20180531')
turnstile_data1 = turnstile_data.loc[:,['STATION','DATE','TIME','hourly_entries','hourly_exits']]
# Merge the two DataFrames to have hourly entries and exits of stations with top Toucan scores
turnstile_data2 = turnstile_data1.merge(top_stations1, on = 'STATION')
# Format dataframe and give it "day of week" and "hour of day" values and
# aggergate hourly entries of each station by date
schedule = pd.DataFrame(columns = ['STATION', 'hour_of_day', 'day_name', 'hourly_entries'])
agg = turnstile_data1.groupby(['STATION','DATE','TIME'])[['hourly_entries']].sum().reset_index()
agg.DATE = pd.to_datetime(agg.DATE, format='%m/%d/%Y')
agg.TIME = pd.to_datetime(agg.TIME, format='%H:%M:%S')
agg['day_name'] = agg.DATE.dt.day_name()
agg['hour_of_day'] = agg.TIME.dt.hour
# Remove 0, 4, and 20 hours of day. Only want 8:00am, 12:00pm, and 4:00pm
agg = agg[(agg['hour_of_day'] > 5) & (agg['hour_of_day'] < 19 )]
# Segment hours of day into three different shifts: Morning, Afternoon and Evening
l_times = []
for h in agg.hour_of_day:
if int(h) <= 11:
l_times.append('Morning')
elif int(h) >= 15:
l_times.append('Evening')
else:
l_times.append('Afternoon')
agg.hour_of_day = l_times
# For each station in the top station list, this for loop generates a schedule, which identifies
# three shifts with the highest number of entries during the week. Volunteers should be at the station
# at these three shifts.
for station_name in top_stations1.STATION.unique():
# Aggergate each station's hourly entries by day of the week, shifts of the day and
# pivot the DataFrame as shift vs. day
hm = agg.loc[agg.STATION == station_name,['hour_of_day','day_name','hourly_entries']]
hm = hm.groupby(['hour_of_day','day_name'])['hourly_entries'].mean().reset_index()
hm = hm.pivot(index='hour_of_day',columns='day_name',values='hourly_entries')
# Calculate three shifts with highest throughput
sc = hm.stack().nlargest(3).reset_index()
sc.rename(columns={0:'hourly_entries'}, inplace=True)
sc['STATION'] = [station_name]*3
schedule = schedule.append(sc) # This is a schedule for all stations in the top station list.
# Make a pivot table of the schedule
schedule['p'] = [1]*schedule.shape[0]
schedule_pivot = schedule.pivot_table(index=['STATION'],columns=['day_name','hour_of_day'],values='p')
return schedule_pivot | 1-Benson_Project/find_schedule.py | 3,218 | This function generally generates a schedule for all stations in the df_top.csv file in a pivot table format. Read the stations with highest Toucan scores and select columns relavant to our schedule algorithm Read the turnstile data and select the columns relavant to schedule algorithm Merge the two DataFrames to have hourly entries and exits of stations with top Toucan scores Format dataframe and give it "day of week" and "hour of day" values and aggergate hourly entries of each station by date Remove 0, 4, and 20 hours of day. Only want 8:00am, 12:00pm, and 4:00pm Segment hours of day into three different shifts: Morning, Afternoon and Evening For each station in the top station list, this for loop generates a schedule, which identifies three shifts with the highest number of entries during the week. Volunteers should be at the station at these three shifts. Aggergate each station's hourly entries by day of the week, shifts of the day and pivot the DataFrame as shift vs. day Calculate three shifts with highest throughput This is a schedule for all stations in the top station list. Make a pivot table of the schedule | 1,136 | en | 0.921402 |
import _continuation
import threading
__all__ = ['Fiber', 'error', 'current']
_tls = threading.local()
def current():
try:
return _tls.current_fiber
except AttributeError:
fiber = _tls.current_fiber = _tls.main_fiber = _create_main_fiber()
return fiber
class error(Exception):
pass
class Fiber(object):
_cont = None
_thread_id = None
_ended = False
def __init__(self, target=None, args=[], kwargs={}, parent=None):
def _run(c):
_tls.current_fiber = self
try:
return target(*args, **kwargs)
finally:
cont = self._cont
self._cont = None
self._ended = True
_continuation.permute(cont, self._get_active_parent()._cont)
self._func = _run
if parent is None:
parent = current()
self._thread_id = threading.current_thread().ident
if self._thread_id != parent._thread_id:
raise error('parent cannot be on a different thread')
self.parent = parent
def _get_active_parent(self):
parent = self.parent
while True:
if parent is not None and parent._cont is not None and not parent._ended:
break
parent = parent.parent
return parent
@classmethod
def current(cls):
return current()
@property
def parent(self):
return self.__dict__.get('parent', None)
@parent.setter
def parent(self, value):
if not isinstance(value, Fiber):
raise TypeError('parent must be a Fiber')
if value._ended:
raise ValueError('parent must not have ended')
if self._thread_id != value._thread_id:
raise ValueError('parent cannot be on a different thread')
self.__dict__['parent'] = value
def switch(self, value=None):
if self._ended:
raise error('Fiber has ended')
curr = current()
if curr._thread_id != self._thread_id:
raise error('Cannot switch to a fiber on a different thread')
if self._cont is None:
self._cont = _continuation.continulet(self._func)
try:
return curr._cont.switch(value=value, to=self._cont)
finally:
_tls.current_fiber = curr
def throw(self, *args):
if self._ended:
raise error('Fiber has ended')
curr = current()
if curr._thread_id != self._thread_id:
raise error('Cannot switch to a fiber on a different thread')
if self._cont is None:
# Fiber was not started yet, propagate to parent directly
self._ended = True
return self._get_active_parent().throw(*args)
try:
return curr._cont.throw(*args, to=self._cont)
finally:
_tls.current_fiber = curr
def is_alive(self):
return (self._cont is not None and self._cont.is_pending()) or \
(self._cont is None and not self._ended)
def __getstate__(self):
raise TypeError('cannot serialize Fiber object')
def _create_main_fiber():
main_fiber = Fiber.__new__(Fiber)
main_fiber._cont = _continuation.continulet.__new__(_continuation.continulet)
main_fiber._ended = False
main_fiber._thread_id = threading.current_thread().ident
main_fiber.__dict__['parent'] = None
return main_fiber
| fibers/_pyfibers.py | 3,459 | Fiber was not started yet, propagate to parent directly | 55 | en | 0.989674 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from cmsplugin_cascade.extra_fields.config import PluginExtraFieldsConfig
CASCADE_PLUGINS = getattr(settings, 'SHOP_CASCADE_PLUGINS',
('auth', 'breadcrumb', 'catalog', 'cart', 'checkout', 'extensions', 'order', 'processbar', 'search',))
def set_defaults(config):
config.setdefault('plugins_with_extra_fields', {})
config['plugins_with_extra_fields'].setdefault('ShopReorderButtonPlugin', PluginExtraFieldsConfig(
inline_styles={
'extra_fields:Margins': ['margin-top', 'margin-right', 'margin-bottom', 'margin-left'],
'extra_units:Margins': 'px,em'
},
))
config['plugins_with_extra_fields'].setdefault('ShopCancelOrderButtonPlugin', PluginExtraFieldsConfig(
inline_styles={
'extra_fields:Margins': ['margin-top', 'margin-right', 'margin-bottom', 'margin-left'],
'extra_units:Margins': 'px,em'
},
))
| shop/cascade/settings.py | 1,004 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
import sklearn.neighbors
from numpy import linalg as LA
from apexpy import Apex
import numpy as np
#Create an Apex conversion instance at the usual reference altitude
#no epoch is specified; we will set the epoch just-in-time when we are going to
#do an coordinate transformation
apex_reference_height = 110000. # Apex reference height in meters
module_Apex = Apex(refh=apex_reference_height/1000.)
def update_apex_epoch(dt):
year = dt.year
doy = dt.timetuple().tm_yday
epoch = year+doy/(366. if np.mod(year,4)==0 else 365.)
print('Setting Apex epoch for {} to {}'.format(dt.strftime('%Y%m%d'),epoch))
module_Apex.set_epoch(epoch)
def dmsp_map_interpolate_NN_smooth_great_circle(lat_dmsp, lon_dmsp, lat_map, lon_map, Obs_map, k = 5, tol = 1.5):
"""
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
"""
tol = np.deg2rad(tol)
#reshape to N by 2 array where each row is (lat, lon)
dmsp_points = np.deg2rad(np.hstack((lat_dmsp.flatten().reshape(-1,1),lon_dmsp.flatten().reshape(-1,1))))
map_points = np.deg2rad(np.hstack((lat_map.flatten().reshape(-1,1), lon_map.flatten().reshape(-1,1))))
N_points = dmsp_points.shape[0]
obs_val = Obs_map.flatten()
model = sklearn.neighbors.NearestNeighbors(n_neighbors = k, radius = tol, metric = 'haversine')
model.fit(map_points)
neighbors = model.kneighbors(dmsp_points, return_distance = True)
#indices
obs_interp = np.empty(N_points)
for i in range(N_points):
distances = neighbors[0][i]
inds = neighbors[1][i]
weights = distances/np.nansum(distances)
obs_interp[i] = np.nansum( obs_val[inds] * weights)
return obs_interp
def latlt2polar(lat,lt,hemisphere):
"""
Converts an array of latitude and lt points to polar for a top-down dialplot (latitude in degrees, LT in hours)
i.e. makes latitude the radial quantity and MLT the azimuthal
get the radial displacement (referenced to down from northern pole if we want to do a top down on the north,
or up from south pole if visa-versa)
"""
from numpy import pi
if hemisphere=='N':
r = 90.-lat
elif hemisphere=='S':
r = 90.-(-1*lat)
else:
raise ValueError('%s is not a valid hemisphere, N or S, please!' % (hemisphere))
#convert lt to theta (azimuthal angle) in radians
theta = lt/24. * 2*pi
#the pi/2 rotates the coordinate system from
#theta=0 at negative y-axis (local time) to
#theta=0 at positive x axis (traditional polar coordinates)
return r,theta
def polar2dial(ax):
"""
Turns a matplotlib axes polar plot into a dial plot
"""
#Rotate the plot so that noon is at the top and midnight
#is at the bottom, and fix the labels so radial direction
#is latitude and azimuthal direction is local time in hours
ax.set_theta_zero_location('S')
theta_label_values = np.array([0.,3.,6.,9.,12.,15.,18.,21.])*180./12
theta_labels = ['%d:00' % (int(th/180.*12)) for th in theta_label_values.flatten().tolist()]
ax.set_thetagrids(theta_label_values,labels=theta_labels)
r_label_values = 90.-np.array([80.,70.,60.,50.,40.])
r_labels = [r'$%d^{o}$' % (int(90.-rv)) for rv in r_label_values.flatten().tolist()]
ax.set_rgrids(r_label_values,labels=r_labels)
ax.set_rlim([0.,40.])
def map_polar2cart(LAT,LON, hemi = 'N'):
#convert latitude and longitude (in degrees) to cartesian coordinates for interpolation purposes
X_map, Y_map = satplottools.latlon2cart(LAT.flatten(), LON.flatten(),hemi)
return X_map, Y_map
def dmsp_map_interpolate(X_dmsp, Y_dmsp, X_map, Y_map, tolerance = 0.5):
"""
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
"""
#indices of the map that fit the dmsp map
indices = scipy.interpolate.griddata((X_map,Y_map), np.arange(len(X_map.flatten())), (X_dmsp,Y_dmsp), method = 'nearest')
#get mask for map elements that are within distance tolerance
mask = (abs(X_map[indices] - X_dmsp) < tolerance) & (abs(Y_map[indices] - Y_dmsp) < tolerance)
return indices,mask
def greatCircleDist(location1,location2,lonorlt='lt'):
#Returns n angular distances in radians between n-by-2 numpy arrays
#location1, location2 (calculated row-wise so diff between
#location1[0,] and location2[0,]
#assuming that these arrays have the columns lat[deg],localtime[hours]
#and that they are points on a sphere of constant radius
#(the points are at the same altitude)
pi = np.pi
azi2rad = pi/12. if lonorlt=='lt' else pi/180
wrappt = 24. if lonorlt=='lt' else 360.
#Bounds check
over = location1[:,1] > wrappt
under = location1[:,1] < 0.
location1[over,1]=location1[over,1]-wrappt
location1[under,1]=location1[under,1]+wrappt
if location1.ndim == 1 or location2.ndim == 1:
dphi = abs(location2[1]-location1[1])*azi2rad
a = (90-location1[0])/360*2*pi #get the colatitude in radians
b = (90-location2[0])/360*2*pi
C = np.pi - np.abs(dphi - np.pi)#get the angular distance in longitude in radians
else:
dphi = abs(location2[:,1]-location1[:,1])*azi2rad
a = (90-location1[:,0])/360*2*pi #get the colatitude in radians
b = (90-location2[:,0])/360*2*pi
C = np.pi - np.abs(dphi - np.pi)#get the angular distance in longitude in radians
return arccos(cos(a)*cos(b)+sin(a)*sin(b)*cos(C))
def myGreatCircleDistance(location1,location2):
#add a dimension
location1 = location1.reshape(1, 2)
location2 = location2.reshape(1, 2)
# location2.shape = (1,)+location2.shape[:,1]
angular_distance = greatCircleDist(location1,location2,lonorlt='lon')
return angular_distance
def dmsp_map_interpolate_NN_smooth(X_dmsp, Y_dmsp, X_map, Y_map, Obs_map, k = 5, tol = 3):
"""
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
"""
#reshape to N by 2 array where each row is (X, Y)
dmsp_points = np.hstack((X_dmsp.flatten().reshape(-1,1),Y_dmsp.flatten().reshape(-1,1)))
map_points = np.hstack((X_map.flatten().reshape(-1,1), Y_map.flatten().reshape(-1,1)))
N_points = dmsp_points.shape[0]
obs_val = Obs_map.flatten()
model = sklearn.neighbors.BallTree(map_points,leaf_size = 40 )
dists, inds = model.query(dmsp_points, k=k)
obs_interp = np.empty(N_points)
for i in range(N_points):
norm = LA.norm(dists[i])
if (norm > tol):
obs_interp[i] = np.nan
else:
# weights = dists[i]/norm
weights = dists[i]/np.nansum(dists[i])
obs_interp[i] = np.nansum( obs_val[inds[i]] * weights )
return obs_interp
def dmsp_map_interpolate_NN_smooth_great_circle(lat_dmsp, lon_dmsp, lat_map, lon_map, Obs_map, k = 5, tol = 1.5):
"""
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
"""
tol = np.deg2rad(tol)
#reshape to N by 2 array where each row is (lat, lon)
dmsp_points = np.deg2rad(np.hstack((lat_dmsp.flatten().reshape(-1,1),lon_dmsp.flatten().reshape(-1,1))))
map_points = np.deg2rad(np.hstack((lat_map.flatten().reshape(-1,1), lon_map.flatten().reshape(-1,1))))
N_points = dmsp_points.shape[0]
obs_val = Obs_map.flatten()
model = sklearn.neighbors.NearestNeighbors(n_neighbors = k, radius = tol, metric = 'haversine')
model.fit(map_points)
neighbors = model.kneighbors(dmsp_points, return_distance = True)
#indices
obs_interp = np.empty(N_points)
for i in range(N_points):
distances = neighbors[0][i]
inds = neighbors[1][i]
weights = distances/np.nansum(distances)
obs_interp[i] = np.nansum( obs_val[inds] * weights)
return obs_interp
from ssj_auroral_boundary import dmsp_spectrogram
def jd2dayhour(jds):
#assume jd is an array
temp = jds - 0.5
hours = (temp - np.floor(temp))*24
return hours
| LBH_to_eflux/helper_funcs.py | 8,218 | generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
Converts an array of latitude and lt points to polar for a top-down dialplot (latitude in degrees, LT in hours)
i.e. makes latitude the radial quantity and MLT the azimuthal
get the radial displacement (referenced to down from northern pole if we want to do a top down on the north,
or up from south pole if visa-versa)
Turns a matplotlib axes polar plot into a dial plot
Create an Apex conversion instance at the usual reference altitudeno epoch is specified; we will set the epoch just-in-time when we are going todo an coordinate transformation Apex reference height in metersreshape to N by 2 array where each row is (lat, lon)indicesconvert lt to theta (azimuthal angle) in radiansthe pi/2 rotates the coordinate system fromtheta=0 at negative y-axis (local time) totheta=0 at positive x axis (traditional polar coordinates)Rotate the plot so that noon is at the top and midnightis at the bottom, and fix the labels so radial directionis latitude and azimuthal direction is local time in hoursconvert latitude and longitude (in degrees) to cartesian coordinates for interpolation purposesindices of the map that fit the dmsp mapget mask for map elements that are within distance tolerance Returns n angular distances in radians between n-by-2 numpy arrayslocation1, location2 (calculated row-wise so diff between location1[0,] and location2[0,]assuming that these arrays have the columns lat[deg],localtime[hours] and that they are points on a sphere of constant radius(the points are at the same altitude)Bounds checkget the colatitude in radiansget the angular distance in longitude in radiansget the colatitude in radiansget the angular distance in longitude in radiansadd a dimension location2.shape = (1,)+location2.shape[:,1]reshape to N by 2 array where each row is (X, Y) weights = dists[i]/normreshape to N by 2 array where each row is (lat, lon)indicesassume jd is an array | 2,404 | en | 0.786136 |
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from cm_api.endpoints.types import *
__docformat__ = "epytext"
HOST_TEMPLATES_PATH = "/clusters/%s/hostTemplates"
HOST_TEMPLATE_PATH = "/clusters/%s/hostTemplates/%s"
APPLY_HOST_TEMPLATE_PATH = HOST_TEMPLATE_PATH + "/commands/applyHostTemplate"
def create_host_template(resource_root, name, cluster_name):
"""
Create a host template.
@param resource_root: The root Resource object.
@param name: Host template name
@param cluster_name: Cluster name
@return: An ApiHostTemplate object for the created host template.
@since: API v3
"""
apitemplate = ApiHostTemplate(resource_root, name, [])
return call(resource_root.post,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, data=[apitemplate], api_version=3)[0]
def get_host_template(resource_root, name, cluster_name):
"""
Lookup a host template by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@return: An ApiHostTemplate object.
@since: API v3
"""
return call(resource_root.get,
HOST_TEMPLATE_PATH % (cluster_name, name),
ApiHostTemplate, api_version=3)
def get_all_host_templates(resource_root, cluster_name="default"):
"""
Get all host templates in a cluster.
@param cluster_name: Cluster name.
@return: ApiList of ApiHostTemplate objects for all host templates in a cluster.
@since: API v3
"""
return call(resource_root.get,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, api_version=3)
def delete_host_template(resource_root, name, cluster_name):
"""
Delete a host template identified by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@return: The deleted ApiHostTemplate object.
@since: API v3
"""
return call(resource_root.delete,
HOST_TEMPLATE_PATH % (cluster_name, name),
ApiHostTemplate, api_version=3)
def update_host_template(resource_root, name, cluster_name, api_host_template):
"""
Update a host template identified by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@param api_host_template: The updated host template.
@return: The updated ApiHostTemplate.
@since: API v3
"""
return call(resource_root.put,
HOST_TEMPLATE_PATH % (cluster_name, name),
ApiHostTemplate, data=api_host_template, api_version=3)
def apply_host_template(resource_root, name, cluster_name, host_ids, start_roles):
"""
Apply a host template identified by name on the specified hosts and
optionally start them.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@param host_ids: List of host ids.
@param start_roles: Whether to start the created roles or not.
@return: An ApiCommand object.
@since: API v3
"""
host_refs = []
for host_id in host_ids:
host_refs.append(ApiHostRef(resource_root, host_id))
params = {"startRoles" : start_roles}
return call(resource_root.post,
APPLY_HOST_TEMPLATE_PATH % (cluster_name, name),
ApiCommand, data=host_refs, params=params, api_version=3)
class ApiHostTemplate(BaseApiResource):
_ATTRIBUTES = {
'name' : None,
'roleConfigGroupRefs' : Attr(ApiRoleConfigGroupRef),
'clusterRef' : ROAttr(ApiClusterRef),
}
def __init__(self, resource_root, name=None, roleConfigGroupRefs=None):
BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return "<ApiHostTemplate>: %s (cluster %s)" % (self.name, self.clusterRef.clusterName)
def _api_version(self):
return 3
def _path(self):
return HOST_TEMPLATE_PATH % (self.clusterRef.clusterName, self.name)
def _do_update(self, update):
self._update(self._put('', ApiHostTemplate, data=update))
return self
def rename(self, new_name):
"""
Rename a host template.
@param new_name: New host template name.
@return: An ApiHostTemplate object.
"""
update = copy.copy(self)
update.name = new_name
return self._do_update(update)
def set_role_config_groups(self, role_config_group_refs):
"""
Updates the role config groups in a host template.
@param role_config_group_refs: List of role config group refs.
@return: An ApiHostTemplate object.
"""
update = copy.copy(self)
update.roleConfigGroupRefs = role_config_group_refs
return self._do_update(update)
def apply_host_template(self, host_ids, start_roles):
"""
Apply a host template identified by name on the specified hosts and
optionally start them.
@param host_ids: List of host ids.
@param start_roles: Whether to start the created roles or not.
@return: An ApiCommand object.
"""
return apply_host_template(self._get_resource_root(), self.name, self.clusterRef.clusterName, host_ids, start_roles)
| python/src/cm_api/endpoints/host_templates.py | 5,832 | Apply a host template identified by name on the specified hosts and
optionally start them.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@param host_ids: List of host ids.
@param start_roles: Whether to start the created roles or not.
@return: An ApiCommand object.
@since: API v3
Apply a host template identified by name on the specified hosts and
optionally start them.
@param host_ids: List of host ids.
@param start_roles: Whether to start the created roles or not.
@return: An ApiCommand object.
Create a host template.
@param resource_root: The root Resource object.
@param name: Host template name
@param cluster_name: Cluster name
@return: An ApiHostTemplate object for the created host template.
@since: API v3
Delete a host template identified by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@return: The deleted ApiHostTemplate object.
@since: API v3
Get all host templates in a cluster.
@param cluster_name: Cluster name.
@return: ApiList of ApiHostTemplate objects for all host templates in a cluster.
@since: API v3
Lookup a host template by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@return: An ApiHostTemplate object.
@since: API v3
Rename a host template.
@param new_name: New host template name.
@return: An ApiHostTemplate object.
Updates the role config groups in a host template.
@param role_config_group_refs: List of role config group refs.
@return: An ApiHostTemplate object.
Update a host template identified by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@param api_host_template: The updated host template.
@return: The updated ApiHostTemplate.
@since: API v3
Licensed to Cloudera, Inc. under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. Cloudera, Inc. licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 2,707 | en | 0.653634 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfacesOperations:
"""NetworkInterfacesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NetworkInterface":
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.NetworkInterface",
**kwargs
) -> "_models.NetworkInterface":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkInterface')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.NetworkInterface",
**kwargs
) -> AsyncLROPoller["_models.NetworkInterface"]:
"""Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network interface operation.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.NetworkInterface":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.NetworkInterface"]:
"""Updates a network interface tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to update network interface tags.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
async def _get_effective_route_table_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> Optional["_models.EffectiveRouteListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EffectiveRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self._get_effective_route_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_effective_route_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
async def begin_get_effective_route_table(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncLROPoller["_models.EffectiveRouteListResult"]:
"""Gets all route tables applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EffectiveRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.EffectiveRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EffectiveRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_effective_route_table_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_effective_route_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
async def _list_effective_network_security_groups_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> Optional["_models.EffectiveNetworkSecurityGroupListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EffectiveNetworkSecurityGroupListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self._list_effective_network_security_groups_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_effective_network_security_groups_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
async def begin_list_effective_network_security_groups(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncLROPoller["_models.EffectiveNetworkSecurityGroupListResult"]:
"""Gets all network security groups applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EffectiveNetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.EffectiveNetworkSecurityGroupListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EffectiveNetworkSecurityGroupListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_effective_network_security_groups_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_effective_network_security_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
def list_virtual_machine_scale_set_vm_network_interfaces(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets information about all network interfaces in a virtual machine in a virtual machine scale
set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'} # type: ignore
def list_virtual_machine_scale_set_network_interfaces(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'} # type: ignore
async def get_virtual_machine_scale_set_network_interface(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NetworkInterface":
"""Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_network_interface.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_network_interface.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_virtual_machine_scale_set_ip_configurations(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceIPConfigurationListResult"]:
"""Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_ip_configurations.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_ip_configurations.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations'} # type: ignore
async def get_virtual_machine_scale_set_ip_configuration(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NetworkInterfaceIPConfiguration":
"""Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration.
:type ip_configuration_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_ip_configuration.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_ip_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'} # type: ignore
| sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py | 63,787 | NetworkInterfacesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
Gets all network interfaces in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
Gets information about all network interfaces in a virtual machine in a virtual machine scale
set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- type: ClsType[None] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: Union[bool, AsyncPollingMethod] type: ClsType[None] type: Optional[str] type: ignore type: ClsType["_models.NetworkInterface"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: ClsType["_models.NetworkInterface"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: Dict[str, Any] type: ignore type: Union[bool, AsyncPollingMethod] type: ClsType["_models.NetworkInterface"] type: Optional[str] type: ignore type: ClsType["_models.NetworkInterface"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: Dict[str, Any] type: ignore type: Union[bool, AsyncPollingMethod] type: ClsType["_models.NetworkInterface"] type: Optional[str] type: ignore type: ClsType["_models.NetworkInterfaceListResult"] Construct headers type: Dict[str, Any] Construct URL type: ignore Construct parameters type: Dict[str, Any] type: Dict[str, Any] type: ignore type: ClsType["_models.NetworkInterfaceListResult"] Construct headers type: Dict[str, Any] Construct URL type: ignore Construct parameters type: Dict[str, Any] type: Dict[str, Any] type: ignore type: ClsType[Optional["_models.EffectiveRouteListResult"]] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: Union[bool, AsyncPollingMethod] type: ClsType["_models.EffectiveRouteListResult"] type: Optional[str] type: ignore type: ClsType[Optional["_models.EffectiveNetworkSecurityGroupListResult"]] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: Union[bool, AsyncPollingMethod] type: ClsType["_models.EffectiveNetworkSecurityGroupListResult"] type: Optional[str] type: ignore type: ClsType["_models.NetworkInterfaceListResult"] Construct headers type: Dict[str, Any] Construct URL type: ignore Construct parameters type: Dict[str, Any] type: Dict[str, Any] type: ignore type: ClsType["_models.NetworkInterfaceListResult"] Construct headers type: Dict[str, Any] Construct URL type: ignore Construct parameters type: Dict[str, Any] type: Dict[str, Any] type: ignore type: ClsType["_models.NetworkInterface"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: ClsType["_models.NetworkInterfaceIPConfigurationListResult"] Construct headers type: Dict[str, Any] Construct URL type: ignore Construct parameters type: Dict[str, Any] type: Dict[str, Any] type: ignore type: ClsType["_models.NetworkInterfaceIPConfiguration"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore | 7,151 | en | 0.420845 |
# -*- coding: utf-8 -*-
# @Time : 2021/5/8 7:14
# @Author : 咸鱼型233
# @File : v1.1_py3_adjust.py
# @Software: PyCharm
# @Function: v1.0的py3适应性调整
# 修改记录 : 2021.5.8-20:34-改崩了,下一版用urllib3实现
import base64
import json
import urllib
import requests
import urllib3
from config import APPCODE, path_image
# 获取图片二进制数据的base64编码(API请求参数需要)
def get_img_base64(img_file):
with open(img_file, 'rb') as infile:
s = infile.read()
return base64.b64encode(s)
# ---
def predict(url, appcode, img_base64, kv_config, old_format):
# 构造请求参数(Body)
if not old_format:
param = {'image': str(img_base64)}
if kv_config is not None:
param['configure'] = json.dumps(kv_config)
# param = json.dumps(param)
body = json.dumps(param)
else:
param = {}
pic = {'dataType': 50, 'dataValue': img_base64}
param['image'] = pic
if kv_config is not None:
conf = {'dataType': 50, 'dataValue': json.dumps(kv_config)}
param['configure'] = conf
inputs = {"inputs": [param]}
body = json.dumps(inputs)
# 根据阿里云表格文字识别API的APPCODE构造Headers
headers = {'Authorization': 'APPCODE %s' % appcode}
# request = requests.post(url=url, headers=headers, data=body)
try:
# response = requests.get(request, timeout=10)
response = requests.get(url=url, headers=headers, data=body)
return response.code, response.headers, response.read()
except Exception as e:
return e.code, e.headers, e.read()
def demo():
appcode = APPCODE
url = 'https://form.market.alicloudapi.com/api/predict/ocr_table_parse'
img_file = path_image
# 如果输入带有inputs, 设置为True,否则设为False
is_old_format = False
config = {'format': 'html', 'finance': False, 'dir_assure': False}
# 如果没有configure字段,config设为None
# config = None
img_base64data = get_img_base64(img_file)
stat, header, content = predict(url, appcode, img_base64data, config, is_old_format)
if stat != 200:
print('Http status code: ', stat)
print('Error msg in header: ', header['x-ca-error-message'] if 'x-ca-error-message' in header else '')
print('Error msg in body: ', content)
exit()
if is_old_format:
result_str = json.loads(content)['outputs'][0]['outputValue']['dataValue']
else:
result_str = content
print(result_str)
# result = json.loads(result_str)
if __name__ == '__main__':
demo()
| DailyLife/picOCR_toExcel/old_version/v1.1_py3_adjust.py | 2,654 | -*- coding: utf-8 -*- @Time : 2021/5/8 7:14 @Author : 咸鱼型233 @File : v1.1_py3_adjust.py @Software: PyCharm @Function: v1.0的py3适应性调整 修改记录 : 2021.5.8-20:34-改崩了,下一版用urllib3实现 获取图片二进制数据的base64编码(API请求参数需要) --- 构造请求参数(Body) param = json.dumps(param) 根据阿里云表格文字识别API的APPCODE构造Headers request = requests.post(url=url, headers=headers, data=body) response = requests.get(request, timeout=10) 如果输入带有inputs, 设置为True,否则设为False 如果没有configure字段,config设为None config = None result = json.loads(result_str) | 496 | zh | 0.275548 |
import cupy as np
def supersample(clip, d, n_frames):
"""Replaces each frame at time t by the mean of `n_frames` equally spaced frames
taken in the interval [t-d, t+d]. This results in motion blur.
"""
def filter(get_frame, t):
timings = np.linspace(t - d, t + d, n_frames)
frame_average = np.mean(
1.0 * np.array([get_frame(t_) for t_ in timings], dtype="uint16"), axis=0
)
return frame_average.astype("uint8")
return clip.transform(filter)
| moviepy/video/fx/supersample.py | 510 | Replaces each frame at time t by the mean of `n_frames` equally spaced frames
taken in the interval [t-d, t+d]. This results in motion blur. | 140 | en | 0.816026 |
"""scrapli_cfg.platform.core.cisco_iosxe.sync_platform"""
from typing import Any, Callable, List, Optional
from scrapli.driver import NetworkDriver
from scrapli.response import MultiResponse, Response
from scrapli_cfg.diff import ScrapliCfgDiffResponse
from scrapli_cfg.exceptions import DiffConfigError, FailedToDetermineDeviceState
from scrapli_cfg.platform.base.sync_platform import ScrapliCfgPlatform
from scrapli_cfg.platform.core.cisco_iosxe.base_platform import (
CONFIG_SOURCES,
FilePromptMode,
ScrapliCfgIOSXEBase,
)
from scrapli_cfg.response import ScrapliCfgResponse
class ScrapliCfgIOSXE(ScrapliCfgPlatform, ScrapliCfgIOSXEBase):
def __init__(
self,
conn: NetworkDriver,
*,
config_sources: Optional[List[str]] = None,
on_prepare: Optional[Callable[..., Any]] = None,
filesystem: str = "flash:",
cleanup_post_commit: bool = True,
dedicated_connection: bool = False,
ignore_version: bool = False,
) -> None:
if config_sources is None:
config_sources = CONFIG_SOURCES
super().__init__(
conn=conn,
config_sources=config_sources,
on_prepare=on_prepare,
dedicated_connection=dedicated_connection,
ignore_version=ignore_version,
)
self.filesystem = filesystem
self._filesystem_space_available_buffer_perc = 10
self._replace = False
self.candidate_config_filename = ""
self.cleanup_post_commit = cleanup_post_commit
def _get_filesystem_space_available(self) -> int:
"""
Abort a configuration -- discards any loaded config
Args:
N/A
Returns:
None
Raises:
FailedToDetermineDeviceState: if unable to fetch file filesystem bytes available
"""
filesystem_size_result = self.conn.send_command(command=f"dir {self.filesystem} | i bytes")
if filesystem_size_result.failed:
raise FailedToDetermineDeviceState("failed to determine space available on filesystem")
return self._post_get_filesystem_space_available(output=filesystem_size_result.result)
def _determine_file_prompt_mode(self) -> FilePromptMode:
"""
Determine the device file prompt mode
Args:
N/A
Returns:
FilePromptMode: enum representing file prompt mode
Raises:
FailedToDetermineDeviceState: if unable to fetch file prompt mode
"""
file_prompt_mode_result = self.conn.send_command(command="show run | i file prompt")
if file_prompt_mode_result.failed:
raise FailedToDetermineDeviceState("failed to determine file prompt mode")
return self._post_determine_file_prompt_mode(output=file_prompt_mode_result.result)
def _delete_candidate_config(self) -> Response:
"""
Delete candidate config from the filesystem
Args:
N/A
Returns:
Response: response from deleting the candidate config
Raises:
N/A
"""
# have to check again because the candidate config may have changed this!
file_prompt_mode = self._determine_file_prompt_mode()
if file_prompt_mode in (FilePromptMode.ALERT, FilePromptMode.NOISY):
delete_events = [
(
f"delete {self.filesystem}{self.candidate_config_filename}",
"Delete filename",
),
(
"",
"[confirm]",
),
("", ""),
]
else:
delete_events = [
(f"delete {self.filesystem}{self.candidate_config_filename}", "[confirm]"),
("", ""),
]
delete_result = self.conn.send_interactive(interact_events=delete_events)
return delete_result
def get_version(self) -> ScrapliCfgResponse:
response = self._pre_get_version()
version_result = self.conn.send_command(command="show version | i Version")
return self._post_get_version(
response=response,
scrapli_responses=[version_result],
result=self._parse_version(device_output=version_result.result),
)
def get_config(self, source: str = "running") -> ScrapliCfgResponse:
response = self._pre_get_config(source=source)
config_result = self.conn.send_command(command=self._get_config_command(source=source))
return self._post_get_config(
response=response,
source=source,
scrapli_responses=[config_result],
result=config_result.result,
)
def load_config(self, config: str, replace: bool = False, **kwargs: Any) -> ScrapliCfgResponse:
"""
Load configuration to a device
Supported kwargs:
auto_clean: automatically "clean" any data that would be in a configuration from a
"get_config" operation that would prevent loading a config -- for example, things
like the "Building Configuration" lines in IOSXE output, etc.. Defaults to `True`
Args:
config: string of the configuration to load
replace: replace the configuration or not, if false configuration will be loaded as a
merge operation
kwargs: additional kwargs that the implementing classes may need for their platform,
see above for iosxe supported kwargs
Returns:
ScrapliCfgResponse: response object
Raises:
N/A
"""
if kwargs.get("auto_clean", True) is True:
config = self.clean_config(config=config)
response = self._pre_load_config(config=config)
config = self._prepare_load_config(config=config, replace=replace)
filesystem_bytes_available = self._get_filesystem_space_available()
self._space_available(filesystem_bytes_available=filesystem_bytes_available)
# when in tcl command mode or whatever it is, tcl wants \r for return char, so stash the
# original return char and sub in \r for a bit
original_return_char = self.conn.comms_return_char
tcl_comms_return_char = "\r"
# pop into tclsh before swapping the return char just to be safe -- \r or \n should both be
# fine for up to here but who knows... :)
self.conn.acquire_priv(desired_priv="tclsh")
self.conn.comms_return_char = tcl_comms_return_char
config_result = self.conn.send_config(config=config, privilege_level="tclsh")
# reset the return char to the "normal" one and drop into whatever is the "default" priv
self.conn.acquire_priv(desired_priv=self.conn.default_desired_privilege_level)
self.conn.comms_return_char = original_return_char
return self._post_load_config(
response=response,
scrapli_responses=[config_result],
)
def abort_config(self) -> ScrapliCfgResponse:
response = self._pre_abort_config(
session_or_config_file=bool(self.candidate_config_filename)
)
abort_result = self._delete_candidate_config()
self._reset_config_session()
return self._post_abort_config(response=response, scrapli_responses=[abort_result])
def save_config(self) -> Response:
"""
Save the config -- "copy run start"!
Args:
N/A
Returns:
Response: scrapli response object
Raises:
N/A
"""
# we always re-check file prompt mode because it could have changed!
file_prompt_mode = self._determine_file_prompt_mode()
if file_prompt_mode == FilePromptMode.ALERT:
save_events = [
(
"copy running-config startup-config",
"Destination filename",
),
("", ""),
]
elif file_prompt_mode == FilePromptMode.NOISY:
save_events = [
(
"copy running-config startup-config",
"Source filename",
),
(
"",
"Destination filename",
),
("", ""),
]
else:
save_events = [("copy running-config startup-config", "")]
save_result = self.conn.send_interactive(interact_events=save_events)
return save_result
def _commit_config_merge(self, file_prompt_mode: Optional[FilePromptMode] = None) -> Response:
"""
Commit the configuration in merge mode
Args:
file_prompt_mode: optionally provide the file prompt mode, if its None we will fetch it
to decide if we need to use interactive mode or not
Returns:
Response: scrapli response object
Raises:
N/A
"""
if file_prompt_mode is None:
file_prompt_mode = self._determine_file_prompt_mode()
if file_prompt_mode == FilePromptMode.ALERT:
merge_events = [
(
f"copy {self.filesystem}{self.candidate_config_filename} running-config",
"Destination filename",
),
("", ""),
]
elif file_prompt_mode == FilePromptMode.NOISY:
merge_events = [
(
f"copy {self.filesystem}{self.candidate_config_filename} running-config",
"Source filename",
),
(
"",
"Destination filename",
),
("", ""),
]
else:
merge_events = [
(f"copy {self.filesystem}{self.candidate_config_filename} running-config", "")
]
commit_result = self.conn.send_interactive(interact_events=merge_events)
return commit_result
def commit_config(self, source: str = "running") -> ScrapliCfgResponse:
scrapli_responses = []
response = self._pre_commit_config(
source=source, session_or_config_file=bool(self.candidate_config_filename)
)
file_prompt_mode = self._determine_file_prompt_mode()
if self._replace is True:
replace_command = (
f"configure replace {self.filesystem}{self.candidate_config_filename} force"
)
commit_result = self.conn.send_command(command=replace_command)
else:
commit_result = self._commit_config_merge(file_prompt_mode=file_prompt_mode)
scrapli_responses.append(commit_result)
save_config_result = self.save_config()
scrapli_responses.append(save_config_result)
if self.cleanup_post_commit:
cleanup_result = self._delete_candidate_config()
scrapli_responses.append(cleanup_result)
self._reset_config_session()
return self._post_load_config(
response=response,
scrapli_responses=scrapli_responses,
)
def diff_config(self, source: str = "running") -> ScrapliCfgDiffResponse:
scrapli_responses = []
device_diff = ""
source_config = ""
diff_response = self._pre_diff_config(
source=source, session_or_config_file=bool(self.candidate_config_filename)
)
try:
diff_result = self.conn.send_command(command=self._get_diff_command(source=source))
scrapli_responses.append(diff_result)
if diff_result.failed:
msg = "failed generating diff for config session"
self.logger.critical(msg)
raise DiffConfigError(msg)
device_diff = diff_result.result
source_config_result = self.get_config(source=source)
source_config = source_config_result.result
if isinstance(source_config_result.scrapli_responses, MultiResponse):
# in this case this will always be a multiresponse or nothing (failure) but mypy
# doesnt know that, hence the isinstance check
scrapli_responses.extend(source_config_result.scrapli_responses)
if source_config_result.failed:
msg = "failed fetching source config for diff comparison"
self.logger.critical(msg)
raise DiffConfigError(msg)
except DiffConfigError:
pass
source_config, candidate_config = self._normalize_source_candidate_configs(
source_config=source_config
)
return self._post_diff_config(
diff_response=diff_response,
scrapli_responses=scrapli_responses,
source_config=source_config,
candidate_config=candidate_config,
device_diff=device_diff,
)
| scrapli_cfg/platform/core/cisco_iosxe/sync_platform.py | 13,089 | Commit the configuration in merge mode
Args:
file_prompt_mode: optionally provide the file prompt mode, if its None we will fetch it
to decide if we need to use interactive mode or not
Returns:
Response: scrapli response object
Raises:
N/A
Delete candidate config from the filesystem
Args:
N/A
Returns:
Response: response from deleting the candidate config
Raises:
N/A
Determine the device file prompt mode
Args:
N/A
Returns:
FilePromptMode: enum representing file prompt mode
Raises:
FailedToDetermineDeviceState: if unable to fetch file prompt mode
Abort a configuration -- discards any loaded config
Args:
N/A
Returns:
None
Raises:
FailedToDetermineDeviceState: if unable to fetch file filesystem bytes available
Load configuration to a device
Supported kwargs:
auto_clean: automatically "clean" any data that would be in a configuration from a
"get_config" operation that would prevent loading a config -- for example, things
like the "Building Configuration" lines in IOSXE output, etc.. Defaults to `True`
Args:
config: string of the configuration to load
replace: replace the configuration or not, if false configuration will be loaded as a
merge operation
kwargs: additional kwargs that the implementing classes may need for their platform,
see above for iosxe supported kwargs
Returns:
ScrapliCfgResponse: response object
Raises:
N/A
Save the config -- "copy run start"!
Args:
N/A
Returns:
Response: scrapli response object
Raises:
N/A
scrapli_cfg.platform.core.cisco_iosxe.sync_platform
have to check again because the candidate config may have changed this! when in tcl command mode or whatever it is, tcl wants \r for return char, so stash the original return char and sub in \r for a bit pop into tclsh before swapping the return char just to be safe -- \r or \n should both be fine for up to here but who knows... :) reset the return char to the "normal" one and drop into whatever is the "default" priv we always re-check file prompt mode because it could have changed! in this case this will always be a multiresponse or nothing (failure) but mypy doesnt know that, hence the isinstance check | 2,256 | en | 0.779101 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetDatacenterConnector
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-vm-migration
# [START vmmigration_v1_generated_VmMigration_GetDatacenterConnector_sync]
from google.cloud import vmmigration_v1
def sample_get_datacenter_connector():
# Create a client
client = vmmigration_v1.VmMigrationClient()
# Initialize request argument(s)
request = vmmigration_v1.GetDatacenterConnectorRequest(
name="name_value",
)
# Make the request
response = client.get_datacenter_connector(request=request)
# Handle the response
print(response)
# [END vmmigration_v1_generated_VmMigration_GetDatacenterConnector_sync]
| samples/generated_samples/vmmigration_v1_generated_vm_migration_get_datacenter_connector_sync.py | 1,515 | -*- coding: utf-8 -*- Copyright 2022 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Generated code. DO NOT EDIT! Snippet for GetDatacenterConnector NOTE: This snippet has been automatically generated for illustrative purposes only. It may require modifications to work in your environment. To install the latest published package dependency, execute the following: python3 -m pip install google-cloud-vm-migration [START vmmigration_v1_generated_VmMigration_GetDatacenterConnector_sync] Create a client Initialize request argument(s) Make the request Handle the response [END vmmigration_v1_generated_VmMigration_GetDatacenterConnector_sync] | 1,129 | en | 0.813116 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import sys
extensions = [
'otcdocstheme'
]
html_theme = 'otcdocs'
html_theme_options = {
}
otcdocs_auto_name = False
otcdocs_auto_version = False
project = 'Dummy Service' # FIXME
otcdocs_repo_name = 'opentelekomcloud-docs/template' # FIXME
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2022-present, Open Telekom Cloud'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use
# for all documents.
# default_role = None
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Dummy UMN" # FIXME
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'dummydoc' # FIXME
latex_documents = [
('index',
'umn-dummy.tex', # FIXME
u'%s User Manual Documentation' % project,
u'OpenTelekomCloud', 'manual'),
]
| umn/source/conf.py | 5,166 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. FIXME FIXME If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- General configuration ---------------------------------------------------- Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. The suffix of source filenames. The encoding of source files. source_encoding = 'utf-8' The master toctree document. General information about the project. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. language = None There are two options for replacing |today|: either, you set today to some non-false value, then it is used: today = '' Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' The reST default role (used for this markup: `text`) to use for all documents. default_role = None If true, sectionauthor and moduleauthor directives will be shown in the output. They are ignored by default. The name of the Pygments (syntax highlighting) style to use. -- Options for man page output ---------------------------------------------- Grouping the document tree for man pages. List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' -- Options for HTML output -------------------------------------------------- The theme to use for HTML and HTML Help pages. Major themes that come with Sphinx are currently 'default' and 'sphinxdoc'. html_theme_path = ["."] html_theme = '_theme' Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} Add any paths that contain custom themes here, relative to this directory. html_theme_path = [] The name for this set of Sphinx documents. If None, it defaults to "<project> v<release> documentation". FIXME A shorter title for the navigation bar. Default is the same as html_title. html_short_title = None The name of an image file (relative to this directory) to place at the top of the sidebar. html_logo = None The name of an image file (within the static path) to use as favicon of the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 pixels large. html_favicon = None Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] If true, SmartyPants will be used to convert quotes and dashes to typographically correct entities. html_use_smartypants = True Custom sidebar templates, maps document names to template names. html_sidebars = {} Additional templates that should be rendered to pages, maps page names to template names. html_additional_pages = {} If false, no module index is generated. html_use_modindex = True If false, no index is generated. html_use_index = True If true, the index is split into individual pages for each letter. html_split_index = False If true, links to the reST sources are added to the pages. html_show_sourcelink = True If true, an OpenSearch description file will be output, and all pages will contain a <link> tag referring to it. The value of this option must be the base URL from which the finished HTML is served. html_use_opensearch = '' If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). html_file_suffix = '' Output file base name for HTML help builder. FIXME FIXME | 4,222 | en | 0.758736 |
import os
import json
import zipfile
import tempfile
from pathlib import Path
from copy import deepcopy
from .exception import FrictionlessException
from .metadata import Metadata
from .detector import Detector
from .resource import Resource
from .system import system
from . import helpers
from . import errors
from . import config
class Package(Metadata):
"""Package representation
API | Usage
-------- | --------
Public | `from frictionless import Package`
This class is one of the cornerstones of of Frictionless framework.
It manages underlaying resource and provides an ability to describe a package.
```python
package = Package(resources=[Resource(path="data/table.csv")])
package.get_resoure('table').read_rows() == [
{'id': 1, 'name': 'english'},
{'id': 2, 'name': '中国人'},
]
```
Parameters:
source (any): Source of the package; can be in various forms.
Usually, it's a package descriptor in a form of dict or path
Also, it can be a glob pattern or a resource path
descriptor (dict|str): A resource descriptor provided explicitly.
Keyword arguments will patch this descriptor if provided.
resources? (dict|Resource[]): A list of resource descriptors.
It can be dicts or Resource instances.
id? (str): A property reserved for globally unique identifiers.
Examples of identifiers that are unique include UUIDs and DOIs.
name? (str): A short url-usable (and preferably human-readable) name.
This MUST be lower-case and contain only alphanumeric characters
along with “.”, “_” or “-” characters.
title? (str): A Package title according to the specs
It should a human-oriented title of the resource.
description? (str): A Package description according to the specs
It should a human-oriented description of the resource.
licenses? (dict[]): The license(s) under which the package is provided.
If omitted it's considered the same as the package's licenses.
sources? (dict[]): The raw sources for this data package.
It MUST be an array of Source objects.
Each Source object MUST have a title and
MAY have path and/or email properties.
profile? (str): A string identifying the profile of this descriptor.
For example, `fiscal-data-package`.
homepage? (str): A URL for the home on the web that is related to this package.
For example, github repository or ckan dataset address.
version? (str): A version string identifying the version of the package.
It should conform to the Semantic Versioning requirements and
should follow the Data Package Version pattern.
contributors? (dict[]): The people or organizations who contributed to this package.
It MUST be an array. Each entry is a Contributor and MUST be an object.
A Contributor MUST have a title property and MAY contain
path, email, role and organization properties.
keywords? (str[]): An Array of string keywords to assist users searching.
For example, ['data', 'fiscal']
image? (str): An image to use for this data package.
For example, when showing the package in a listing.
created? (str): The datetime on which this was created.
The datetime must conform to the string formats for RFC3339 datetime,
basepath? (str): A basepath of the resource
The fullpath of the resource is joined `basepath` and /path`
detector? (Detector): File/table detector.
For more information, please check the Detector documentation.
onerror? (ignore|warn|raise): Behaviour if there is an error.
It defaults to 'ignore'. The default mode will ignore all errors
on resource level and they should be handled by the user
being available in Header and Row objects.
trusted? (bool): Don't raise an exception on unsafe paths.
A path provided as a part of the descriptor considered unsafe
if there are path traversing or the path is absolute.
A path provided as `source` or `path` is alway trusted.
hashing? (str): a hashing algorithm for resources
It defaults to 'md5'.
Raises:
FrictionlessException: raise any error that occurs during the process
"""
def __init__(
self,
source=None,
*,
descriptor=None,
# Spec
resources=None,
id=None,
name=None,
title=None,
description=None,
licenses=None,
sources=None,
profile=None,
homepage=None,
version=None,
contributors=None,
keywords=None,
image=None,
created=None,
# Extra
basepath="",
detector=None,
onerror="ignore",
trusted=False,
hashing=None,
):
# Handle source
if source is not None:
if descriptor is None:
descriptor = source
file = system.create_file(source, basepath=basepath)
if file.multipart:
descriptor = {"resources": []}
for part in file.normpath:
descriptor["resources"].append({"path": part})
elif file.type == "table" and not file.compression:
descriptor = {"resources": [{"path": file.normpath}]}
# Handle pathlib
if isinstance(descriptor, Path):
descriptor = str(descriptor)
# Handle trusted
if descriptor is None:
trusted = True
# Handle zip
if helpers.is_zip_descriptor(descriptor):
descriptor = helpers.unzip_descriptor(descriptor, "datapackage.json")
# Set attributes
self.setinitial("resources", resources)
self.setinitial("name", name)
self.setinitial("id", id)
self.setinitial("licenses", licenses)
self.setinitial("profile", profile)
self.setinitial("title", title)
self.setinitial("description", description)
self.setinitial("homepage", homepage)
self.setinitial("version", version)
self.setinitial("sources", sources)
self.setinitial("contributors", contributors)
self.setinitial("keywords", keywords)
self.setinitial("image", image)
self.setinitial("created", created)
self.__basepath = basepath or helpers.parse_basepath(descriptor)
self.__detector = detector or Detector()
self.__onerror = onerror
self.__trusted = trusted
self.__hashing = hashing
super().__init__(descriptor)
def __setattr__(self, name, value):
if name == "hashing":
self.__hashing = value
elif name == "basepath":
self.__basepath = value
elif name == "onerror":
self.__onerror = value
elif name == "trusted":
self.__trusted = value
else:
return super().__setattr__(name, value)
self.metadata_process()
@Metadata.property
def name(self):
"""
Returns:
str?: package name
"""
return self.get("name")
@Metadata.property
def id(self):
"""
Returns:
str?: package id
"""
return self.get("id")
@Metadata.property
def licenses(self):
"""
Returns:
dict?: package licenses
"""
return self.get("licenses")
@Metadata.property
def profile(self):
"""
Returns:
str: package profile
"""
return self.get("profile", config.DEFAULT_PACKAGE_PROFILE)
@Metadata.property
def title(self):
"""
Returns:
str?: package title
"""
return self.get("title")
@Metadata.property
def description(self):
"""
Returns:
str?: package description
"""
return self.get("description")
@Metadata.property
def homepage(self):
"""
Returns:
str?: package homepage
"""
return self.get("homepage")
@Metadata.property
def version(self):
"""
Returns:
str?: package version
"""
return self.get("version")
@Metadata.property
def sources(self):
"""
Returns:
dict[]?: package sources
"""
return self.get("sources")
@Metadata.property
def contributors(self):
"""
Returns:
dict[]?: package contributors
"""
return self.get("contributors")
@Metadata.property
def keywords(self):
"""
Returns:
str[]?: package keywords
"""
return self.get("keywords")
@Metadata.property
def image(self):
"""
Returns:
str?: package image
"""
return self.get("image")
@Metadata.property
def created(self):
"""
Returns:
str?: package created
"""
return self.get("created")
@Metadata.property(cache=False, write=False)
def hashing(self):
"""
Returns:
str: package hashing
"""
return self.__hashing
@Metadata.property(cache=False, write=False)
def basepath(self):
"""
Returns:
str: package basepath
"""
return self.__basepath
@Metadata.property(cache=False, write=False)
def onerror(self):
"""
Returns:
ignore|warn|raise: on error bahaviour
"""
return self.__onerror
@Metadata.property(cache=False, write=False)
def trusted(self):
"""
Returns:
str: package trusted
"""
return self.__trusted
# Resources
@Metadata.property
def resources(self):
"""
Returns:
Resources[]: package resource
"""
resources = self.get("resources", [])
return self.metadata_attach("resources", resources)
@Metadata.property(cache=False, write=False)
def resource_names(self):
"""
Returns:
str[]: package resource names
"""
return [resource.name for resource in self.resources]
def add_resource(self, descriptor):
"""Add new resource to package.
Parameters:
descriptor (dict): resource descriptor
Returns:
Resource/None: added `Resource` instance or `None` if not added
"""
self.setdefault("resources", [])
self["resources"].append(descriptor)
return self.resources[-1]
def get_resource(self, name):
"""Get resource by name.
Parameters:
name (str): resource name
Raises:
FrictionlessException: if resource is not found
Returns:
Resource/None: `Resource` instance or `None` if not found
"""
for resource in self.resources:
if resource.name == name:
return resource
error = errors.PackageError(note=f'resource "{name}" does not exist')
raise FrictionlessException(error)
def has_resource(self, name):
"""Check if a resource is present
Parameters:
name (str): schema resource name
Returns:
bool: whether there is the resource
"""
for resource in self.resources:
if resource.name == name:
return True
return False
def remove_resource(self, name):
"""Remove resource by name.
Parameters:
name (str): resource name
Raises:
FrictionlessException: if resource is not found
Returns:
Resource/None: removed `Resource` instances or `None` if not found
"""
resource = self.get_resource(name)
self.resources.remove(resource)
return resource
# Expand
def expand(self):
"""Expand metadata
It will add default values to the package.
"""
self.setdefault("resources", self.resources)
self.setdefault("profile", self.profile)
for resource in self.resources:
resource.expand()
# Infer
def infer(self, *, stats=False):
"""Infer package's attributes
Parameters:
stats? (bool): stream files completely and infer stats
"""
# General
self.setdefault("profile", config.DEFAULT_PACKAGE_PROFILE)
for resource in self.resources:
resource.infer(stats=stats)
# Deduplicate names
if len(self.resource_names) != len(set(self.resource_names)):
seen_names = []
for index, name in enumerate(self.resource_names):
count = seen_names.count(name) + 1
if count > 1:
self.resources[index].name = "%s%s" % (name, count)
seen_names.append(name)
# Import/Export
def to_copy(self):
"""Create a copy of the package"""
descriptor = self.to_dict()
# Resource's data can be not serializable (generators/functions)
descriptor.pop("resources", None)
resources = []
for resource in self.resources:
resources.append(resource.to_copy())
return Package(
descriptor,
resources=resources,
basepath=self.__basepath,
onerror=self.__onerror,
trusted=self.__trusted,
)
@staticmethod
def from_bigquery(source, *, dialect=None):
"""Import package from Bigquery
Parameters:
source (string): BigQuery `Service` object
dialect (dict): BigQuery dialect
Returns:
Package: package
"""
storage = system.create_storage("bigquery", source, dialect=dialect)
return storage.read_package()
def to_bigquery(self, target, *, dialect=None):
"""Export package to Bigquery
Parameters:
target (string): BigQuery `Service` object
dialect (dict): BigQuery dialect
Returns:
BigqueryStorage: storage
"""
storage = system.create_storage("bigquery", target, dialect=dialect)
storage.write_package(self.to_copy(), force=True)
return storage
@staticmethod
def from_ckan(source, *, dialect=None):
"""Import package from CKAN
Parameters:
source (string): CKAN instance url e.g. "https://demo.ckan.org"
dialect (dict): CKAN dialect
Returns:
Package: package
"""
storage = system.create_storage("ckan", source, dialect=dialect)
return storage.read_package()
def to_ckan(self, target, *, dialect=None):
"""Export package to CKAN
Parameters:
target (string): CKAN instance url e.g. "https://demo.ckan.org"
dialect (dict): CKAN dialect
Returns:
CkanStorage: storage
"""
storage = system.create_storage("ckan", target, dialect=dialect)
storage.write_package(self.to_copy(), force=True)
return storage
@staticmethod
def from_sql(source, *, dialect=None):
"""Import package from SQL
Parameters:
source (any): SQL connection string of engine
dialect (dict): SQL dialect
Returns:
Package: package
"""
storage = system.create_storage("sql", source, dialect=dialect)
return storage.read_package()
def to_sql(self, target, *, dialect=None):
"""Export package to SQL
Parameters:
target (any): SQL connection string of engine
dialect (dict): SQL dialect
Returns:
SqlStorage: storage
"""
storage = system.create_storage("sql", target, dialect=dialect)
storage.write_package(self.to_copy(), force=True)
return storage
@staticmethod
def from_zip(path, **options):
"""Create a package from ZIP
Parameters:
path(str): file path
**options(dict): resouce options
"""
return Package(descriptor=path, **options)
def to_zip(self, path, *, encoder_class=None):
"""Save package to a zip
Parameters:
path (str): target path
encoder_class (object): json encoder class
Raises:
FrictionlessException: on any error
"""
try:
with zipfile.ZipFile(path, "w") as archive:
package_descriptor = self.to_dict()
for index, resource in enumerate(self.resources):
descriptor = package_descriptor["resources"][index]
# Remote data
if resource.remote:
pass
# Memory data
elif resource.memory:
if not isinstance(resource.data, list):
path = f"{resource.name}.csv"
descriptor["path"] = path
del descriptor["data"]
with tempfile.NamedTemporaryFile() as file:
tgt = Resource(path=file.name, format="csv", trusted=True)
resource.write(tgt)
archive.write(file.name, path)
# Multipart data
elif resource.multipart:
for path, fullpath in zip(resource.path, resource.fullpath):
if os.path.isfile(fullpath):
if not helpers.is_safe_path(fullpath):
note = f'Zipping usafe "{fullpath}" is not supported'
error = errors.PackageError(note=note)
raise FrictionlessException(error)
archive.write(fullpath, path)
# Local Data
else:
path = resource.path
fullpath = resource.fullpath
if os.path.isfile(fullpath):
if not helpers.is_safe_path(fullpath):
note = f'Zipping usafe "{fullpath}" is not supported'
error = errors.PackageError(note=note)
raise FrictionlessException(error)
archive.write(fullpath, path)
# Metadata
archive.writestr(
"datapackage.json",
json.dumps(
package_descriptor,
indent=2,
ensure_ascii=False,
cls=encoder_class,
),
)
except Exception as exception:
error = errors.PackageError(note=str(exception))
raise FrictionlessException(error) from exception
# Metadata
metadata_duplicate = True
metadata_Error = errors.PackageError # type: ignore
metadata_profile = deepcopy(config.PACKAGE_PROFILE)
metadata_profile["properties"]["resources"] = {"type": "array"}
def metadata_process(self):
# Resources
resources = self.get("resources")
if isinstance(resources, list):
for index, resource in enumerate(resources):
if not isinstance(resource, Resource):
if not isinstance(resource, dict):
resource = {"name": f"resource{index+1}"}
resource = Resource(
resource,
basepath=self.__basepath,
detector=self.__detector,
hashing=self.__hashing,
)
list.__setitem__(resources, index, resource)
resource.onerror = self.__onerror
resource.trusted = self.__trusted
resource.package = self
if not isinstance(resources, helpers.ControlledList):
resources = helpers.ControlledList(resources)
resources.__onchange__(self.metadata_process)
dict.__setitem__(self, "resources", resources)
def metadata_validate(self):
yield from super().metadata_validate()
# Extensions
if self.profile == "fiscal-data-package":
yield from super().metadata_validate(config.FISCAL_PACKAGE_PROFILE)
# Resources
for resource in self.resources:
yield from resource.metadata_errors
| frictionless/package.py | 21,136 | Package representation
API | Usage
-------- | --------
Public | `from frictionless import Package`
This class is one of the cornerstones of of Frictionless framework.
It manages underlaying resource and provides an ability to describe a package.
```python
package = Package(resources=[Resource(path="data/table.csv")])
package.get_resoure('table').read_rows() == [
{'id': 1, 'name': 'english'},
{'id': 2, 'name': '中国人'},
]
```
Parameters:
source (any): Source of the package; can be in various forms.
Usually, it's a package descriptor in a form of dict or path
Also, it can be a glob pattern or a resource path
descriptor (dict|str): A resource descriptor provided explicitly.
Keyword arguments will patch this descriptor if provided.
resources? (dict|Resource[]): A list of resource descriptors.
It can be dicts or Resource instances.
id? (str): A property reserved for globally unique identifiers.
Examples of identifiers that are unique include UUIDs and DOIs.
name? (str): A short url-usable (and preferably human-readable) name.
This MUST be lower-case and contain only alphanumeric characters
along with “.”, “_” or “-” characters.
title? (str): A Package title according to the specs
It should a human-oriented title of the resource.
description? (str): A Package description according to the specs
It should a human-oriented description of the resource.
licenses? (dict[]): The license(s) under which the package is provided.
If omitted it's considered the same as the package's licenses.
sources? (dict[]): The raw sources for this data package.
It MUST be an array of Source objects.
Each Source object MUST have a title and
MAY have path and/or email properties.
profile? (str): A string identifying the profile of this descriptor.
For example, `fiscal-data-package`.
homepage? (str): A URL for the home on the web that is related to this package.
For example, github repository or ckan dataset address.
version? (str): A version string identifying the version of the package.
It should conform to the Semantic Versioning requirements and
should follow the Data Package Version pattern.
contributors? (dict[]): The people or organizations who contributed to this package.
It MUST be an array. Each entry is a Contributor and MUST be an object.
A Contributor MUST have a title property and MAY contain
path, email, role and organization properties.
keywords? (str[]): An Array of string keywords to assist users searching.
For example, ['data', 'fiscal']
image? (str): An image to use for this data package.
For example, when showing the package in a listing.
created? (str): The datetime on which this was created.
The datetime must conform to the string formats for RFC3339 datetime,
basepath? (str): A basepath of the resource
The fullpath of the resource is joined `basepath` and /path`
detector? (Detector): File/table detector.
For more information, please check the Detector documentation.
onerror? (ignore|warn|raise): Behaviour if there is an error.
It defaults to 'ignore'. The default mode will ignore all errors
on resource level and they should be handled by the user
being available in Header and Row objects.
trusted? (bool): Don't raise an exception on unsafe paths.
A path provided as a part of the descriptor considered unsafe
if there are path traversing or the path is absolute.
A path provided as `source` or `path` is alway trusted.
hashing? (str): a hashing algorithm for resources
It defaults to 'md5'.
Raises:
FrictionlessException: raise any error that occurs during the process
Add new resource to package.
Parameters:
descriptor (dict): resource descriptor
Returns:
Resource/None: added `Resource` instance or `None` if not added
Returns:
str: package basepath
Returns:
dict[]?: package contributors
Returns:
str?: package created
Returns:
str?: package description
Expand metadata
It will add default values to the package.
Import package from Bigquery
Parameters:
source (string): BigQuery `Service` object
dialect (dict): BigQuery dialect
Returns:
Package: package
Import package from CKAN
Parameters:
source (string): CKAN instance url e.g. "https://demo.ckan.org"
dialect (dict): CKAN dialect
Returns:
Package: package
Import package from SQL
Parameters:
source (any): SQL connection string of engine
dialect (dict): SQL dialect
Returns:
Package: package
Create a package from ZIP
Parameters:
path(str): file path
**options(dict): resouce options
Get resource by name.
Parameters:
name (str): resource name
Raises:
FrictionlessException: if resource is not found
Returns:
Resource/None: `Resource` instance or `None` if not found
Check if a resource is present
Parameters:
name (str): schema resource name
Returns:
bool: whether there is the resource
Returns:
str: package hashing
Returns:
str?: package homepage
Returns:
str?: package id
Returns:
str?: package image
Infer package's attributes
Parameters:
stats? (bool): stream files completely and infer stats
Returns:
str[]?: package keywords
Returns:
dict?: package licenses
Returns:
str?: package name
Returns:
ignore|warn|raise: on error bahaviour
Returns:
str: package profile
Remove resource by name.
Parameters:
name (str): resource name
Raises:
FrictionlessException: if resource is not found
Returns:
Resource/None: removed `Resource` instances or `None` if not found
Returns:
str[]: package resource names
Returns:
Resources[]: package resource
Returns:
dict[]?: package sources
Returns:
str?: package title
Export package to Bigquery
Parameters:
target (string): BigQuery `Service` object
dialect (dict): BigQuery dialect
Returns:
BigqueryStorage: storage
Export package to CKAN
Parameters:
target (string): CKAN instance url e.g. "https://demo.ckan.org"
dialect (dict): CKAN dialect
Returns:
CkanStorage: storage
Create a copy of the package
Export package to SQL
Parameters:
target (any): SQL connection string of engine
dialect (dict): SQL dialect
Returns:
SqlStorage: storage
Save package to a zip
Parameters:
path (str): target path
encoder_class (object): json encoder class
Raises:
FrictionlessException: on any error
Returns:
str: package trusted
Returns:
str?: package version
Spec Extra Handle source Handle pathlib Handle trusted Handle zip Set attributes Resources Expand Infer General Deduplicate names Import/Export Resource's data can be not serializable (generators/functions) Remote data Memory data Multipart data Local Data Metadata Metadata type: ignore Resources Extensions Resources | 7,033 | en | 0.694507 |
#
# PySNMP MIB module INT-SERV-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/INT-SERV-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:18:45 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
InterfaceIndex, ifIndex = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Counter32, IpAddress, ModuleIdentity, Unsigned32, MibIdentifier, NotificationType, Integer32, TimeTicks, Bits, mib_2, iso, Gauge32, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Counter32", "IpAddress", "ModuleIdentity", "Unsigned32", "MibIdentifier", "NotificationType", "Integer32", "TimeTicks", "Bits", "mib-2", "iso", "Gauge32", "ObjectIdentity")
DisplayString, TruthValue, RowStatus, TestAndIncr, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "RowStatus", "TestAndIncr", "TextualConvention")
intSrv = ModuleIdentity((1, 3, 6, 1, 2, 1, 52))
if mibBuilder.loadTexts: intSrv.setLastUpdated('9710030642Z')
if mibBuilder.loadTexts: intSrv.setOrganization('IETF Integrated Services Working Group')
if mibBuilder.loadTexts: intSrv.setContactInfo(' Fred Baker Postal: Cisco Systems 519 Lado Drive Santa Barbara, California 93111 Tel: +1 805 681 0115 E-Mail: fred@cisco.com John Krawczyk Postal: ArrowPoint Communications 235 Littleton Road Westford, Massachusetts 01886 Tel: +1 508 692 5875 E-Mail: jjk@tiac.net')
if mibBuilder.loadTexts: intSrv.setDescription('The MIB module to describe the Integrated Services Protocol')
intSrvObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 1))
intSrvGenObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 2))
intSrvNotifications = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 3))
intSrvConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 4))
class SessionNumber(TextualConvention, Integer32):
description = 'The Session Number convention is used for numbers identifying sessions or saved PATH or RESV information. It is a number in the range returned by a TestAndIncr variable, having no protocol meaning whatsoever but serving instead as simple identifier. The alternative was a very complex instance or instance object that became unwieldy.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class Protocol(TextualConvention, Integer32):
description = 'The value of the IP Protocol field of an IP Datagram Header. This identifies the protocol layer above IP. For example, the value 6 is used for TCP and the value 17 is used for UDP. The values of this field are defined in the As- signed Numbers RFC.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 255)
class SessionType(TextualConvention, Integer32):
description = "The value of the C-Type field of a Session ob- ject, as defined in the RSVP specification. This value determines the lengths of octet strings and use of certain objects such as the 'port' variables. If the C-Type calls for an IP6 address, one would expect all source, des- tination, and next/previous hop addresses to be 16 bytes long, and for the ports to be UDP/TCP port numbers, for example."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 255)
class Port(TextualConvention, OctetString):
description = 'The value of the UDP or TCP Source or Destina- tion Port field, a virtual destination port or generalized port identifier used with the IPSEC Authentication Header or Encapsulating Security Payload, or other session discriminator. If it is not used, the value should be of length 0. This pair, when coupled with the IP Addresses of the source and destination system and the IP protocol field, uniquely identifies a data stream.'
status = 'current'
displayHint = 'd'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(2, 4)
class MessageSize(TextualConvention, Integer32):
description = 'The size of a message in bytes. This is used to specify the minimum and maximum size of a message along an integrated services route.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class BitRate(TextualConvention, Integer32):
description = 'The rate, in bits/second, that data may move in the context. Applicable contexts minimally include the speed of an interface or virtual circuit, the data rate of a (potentially aggre- gated) data flow, or the data rate to be allo- cated for use by a flow.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class BurstSize(TextualConvention, Integer32):
description = 'The number of octets of IP Data, including IP Headers, that a stream may send without concern for policing.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class QosService(TextualConvention, Integer32):
description = 'The class of service in use by a flow.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 5))
namedValues = NamedValues(("bestEffort", 1), ("guaranteedDelay", 2), ("controlledLoad", 5))
intSrvIfAttribTable = MibTable((1, 3, 6, 1, 2, 1, 52, 1, 1), )
if mibBuilder.loadTexts: intSrvIfAttribTable.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribTable.setDescription("The reservable attributes of the system's in- terfaces.")
intSrvIfAttribEntry = MibTableRow((1, 3, 6, 1, 2, 1, 52, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: intSrvIfAttribEntry.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribEntry.setDescription('The reservable attributes of a given inter- face.')
intSrvIfAttribAllocatedBits = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 1), BitRate()).setUnits('Bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBits.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBits.setDescription('The number of bits/second currently allocated to reserved sessions on the interface.')
intSrvIfAttribMaxAllocatedBits = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 2), BitRate()).setUnits('Bits per second').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvIfAttribMaxAllocatedBits.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribMaxAllocatedBits.setDescription('The maximum number of bits/second that may be allocated to reserved sessions on the inter- face.')
intSrvIfAttribAllocatedBuffer = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 3), BurstSize()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBuffer.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBuffer.setDescription('The amount of buffer space required to hold the simultaneous burst of all reserved flows on the interface.')
intSrvIfAttribFlows = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvIfAttribFlows.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribFlows.setDescription('The number of reserved flows currently active on this interface. A flow can be created ei- ther from a reservation protocol (such as RSVP or ST-II) or via configuration information.')
intSrvIfAttribPropagationDelay = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 5), Integer32()).setUnits('microseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvIfAttribPropagationDelay.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribPropagationDelay.setDescription('The amount of propagation delay that this in- terface introduces in addition to that intro- diced by bit propagation delays.')
intSrvIfAttribStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvIfAttribStatus.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribStatus.setDescription("'active' on interfaces that are configured for RSVP.")
intSrvFlowTable = MibTable((1, 3, 6, 1, 2, 1, 52, 1, 2), )
if mibBuilder.loadTexts: intSrvFlowTable.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowTable.setDescription("Information describing the reserved flows us- ing the system's interfaces.")
intSrvFlowEntry = MibTableRow((1, 3, 6, 1, 2, 1, 52, 1, 2, 1), ).setIndexNames((0, "INT-SERV-MIB", "intSrvFlowNumber"))
if mibBuilder.loadTexts: intSrvFlowEntry.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowEntry.setDescription('Information describing the use of a given in- terface by a given flow. The counter intSrvFlowPoliced starts counting at the in- stallation of the flow.')
intSrvFlowNumber = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 1), SessionNumber())
if mibBuilder.loadTexts: intSrvFlowNumber.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowNumber.setDescription('The number of this flow. This is for SNMP In- dexing purposes only and has no relation to any protocol value.')
intSrvFlowType = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 2), SessionType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowType.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowType.setDescription('The type of session (IP4, IP6, IP6 with flow information, etc).')
intSrvFlowOwner = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("rsvp", 2), ("management", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowOwner.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowOwner.setDescription('The process that installed this flow in the queue policy database.')
intSrvFlowDestAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDestAddr.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDestAddr.setDescription("The destination address used by all senders in this session. This object may not be changed when the value of the RowStatus object is 'ac- tive'.")
intSrvFlowSenderAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowSenderAddr.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowSenderAddr.setDescription("The source address of the sender selected by this reservation. The value of all zeroes in- dicates 'all senders'. This object may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowDestAddrLength = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDestAddrLength.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDestAddrLength.setDescription("The length of the destination address in bits. This is the CIDR Prefix Length, which for IP4 hosts and multicast addresses is 32 bits. This object may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowSenderAddrLength = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowSenderAddrLength.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowSenderAddrLength.setDescription("The length of the sender's address in bits. This is the CIDR Prefix Length, which for IP4 hosts and multicast addresses is 32 bits. This object may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowProtocol = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 8), Protocol()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowProtocol.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowProtocol.setDescription("The IP Protocol used by a session. This ob- ject may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowDestPort = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 9), Port()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDestPort.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDestPort.setDescription("The UDP or TCP port number used as a destina- tion port for all senders in this session. If the IP protocol in use, specified by intSrvResvFwdProtocol, is 50 (ESP) or 51 (AH), this represents a virtual destination port number. A value of zero indicates that the IP protocol in use does not have ports. This ob- ject may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowPort = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 10), Port()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowPort.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowPort.setDescription("The UDP or TCP port number used as a source port for this sender in this session. If the IP protocol in use, specified by intSrvResvFwdProtocol is 50 (ESP) or 51 (AH), this represents a generalized port identifier (GPI). A value of zero indicates that the IP protocol in use does not have ports. This ob- ject may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowFlowId = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowFlowId.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowFlowId.setDescription('The flow ID that this sender is using, if this is an IPv6 session.')
intSrvFlowInterface = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 12), InterfaceIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowInterface.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowInterface.setDescription('The ifIndex value of the interface on which this reservation exists.')
intSrvFlowIfAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 13), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowIfAddr.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowIfAddr.setDescription('The IP Address on the ifEntry on which this reservation exists. This is present primarily to support those interfaces which layer multi- ple IP Addresses on the interface.')
intSrvFlowRate = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 14), BitRate()).setUnits('bits per second').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowRate.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowRate.setDescription("The Reserved Rate of the sender's data stream. If this is a Controlled Load service flow, this rate is derived from the Tspec rate parameter (r). If this is a Guaranteed service flow, this rate is derived from the Rspec clearing rate parameter (R).")
intSrvFlowBurst = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 15), BurstSize()).setUnits('bytes').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowBurst.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowBurst.setDescription("The size of the largest burst expected from the sender at a time. If this is less than the sender's advertised burst size, the receiver is asking the network to provide flow pacing beyond what would be provided under normal circumstances. Such pac- ing is at the network's option.")
intSrvFlowWeight = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 16), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowWeight.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowWeight.setDescription('The weight used to prioritize the traffic. Note that the interpretation of this object is implementation-specific, as implementations vary in their use of weighting procedures.')
intSrvFlowQueue = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 17), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowQueue.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowQueue.setDescription('The number of the queue used by this traffic. Note that the interpretation of this object is implementation-specific, as implementations vary in their use of queue identifiers.')
intSrvFlowMinTU = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 18), MessageSize()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowMinTU.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowMinTU.setDescription('The minimum message size for this flow. The policing algorithm will treat smaller messages as though they are this size.')
intSrvFlowMaxTU = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 19), MessageSize()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowMaxTU.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowMaxTU.setDescription('The maximum datagram size for this flow that will conform to the traffic specification. This value cannot exceed the MTU of the interface.')
intSrvFlowBestEffort = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowBestEffort.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowBestEffort.setDescription('The number of packets that were remanded to best effort service.')
intSrvFlowPoliced = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowPoliced.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowPoliced.setDescription("The number of packets policed since the incep- tion of the flow's service.")
intSrvFlowDiscard = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 22), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDiscard.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDiscard.setDescription("If 'true', the flow is to incur loss when traffic is policed. If 'false', policed traff- ic is treated as best effort traffic.")
intSrvFlowService = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 23), QosService()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowService.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowService.setDescription('The QoS service being applied to this flow.')
intSrvFlowOrder = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowOrder.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowOrder.setDescription('In the event of ambiguity, the order in which the classifier should make its comparisons. The row with intSrvFlowOrder=0 is tried first, and comparisons proceed in the order of in- creasing value. Non-serial implementations of the classifier should emulate this behavior.')
intSrvFlowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 25), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowStatus.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowStatus.setDescription("'active' for all active flows. This object may be used to install static classifier infor- mation, delete classifier information, or au- thorize such.")
intSrvFlowNewIndex = MibScalar((1, 3, 6, 1, 2, 1, 52, 2, 1), TestAndIncr()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: intSrvFlowNewIndex.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowNewIndex.setDescription("This object is used to assign values to intSrvFlowNumber as described in 'Textual Con- ventions for SNMPv2'. The network manager reads the object, and then writes the value back in the SET that creates a new instance of intSrvFlowEntry. If the SET fails with the code 'inconsistentValue', then the process must be repeated; If the SET succeeds, then the ob- ject is incremented, and the new instance is created according to the manager's directions.")
intSrvGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 4, 1))
intSrvCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 4, 2))
intSrvCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 52, 4, 2, 1)).setObjects(("INT-SERV-MIB", "intSrvIfAttribGroup"), ("INT-SERV-MIB", "intSrvFlowsGroup"), ("INT-SERV-MIB", "intSrvGenObjectsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvCompliance = intSrvCompliance.setStatus('current')
if mibBuilder.loadTexts: intSrvCompliance.setDescription('The compliance statement ')
intSrvIfAttribGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 52, 4, 1, 1)).setObjects(("INT-SERV-MIB", "intSrvIfAttribAllocatedBits"), ("INT-SERV-MIB", "intSrvIfAttribMaxAllocatedBits"), ("INT-SERV-MIB", "intSrvIfAttribAllocatedBuffer"), ("INT-SERV-MIB", "intSrvIfAttribFlows"), ("INT-SERV-MIB", "intSrvIfAttribPropagationDelay"), ("INT-SERV-MIB", "intSrvIfAttribStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvIfAttribGroup = intSrvIfAttribGroup.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribGroup.setDescription('These objects are required for Systems sup- porting the Integrated Services Architecture.')
intSrvFlowsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 52, 4, 1, 2)).setObjects(("INT-SERV-MIB", "intSrvFlowType"), ("INT-SERV-MIB", "intSrvFlowOwner"), ("INT-SERV-MIB", "intSrvFlowDestAddr"), ("INT-SERV-MIB", "intSrvFlowSenderAddr"), ("INT-SERV-MIB", "intSrvFlowDestAddrLength"), ("INT-SERV-MIB", "intSrvFlowSenderAddrLength"), ("INT-SERV-MIB", "intSrvFlowProtocol"), ("INT-SERV-MIB", "intSrvFlowDestPort"), ("INT-SERV-MIB", "intSrvFlowPort"), ("INT-SERV-MIB", "intSrvFlowFlowId"), ("INT-SERV-MIB", "intSrvFlowInterface"), ("INT-SERV-MIB", "intSrvFlowBestEffort"), ("INT-SERV-MIB", "intSrvFlowRate"), ("INT-SERV-MIB", "intSrvFlowBurst"), ("INT-SERV-MIB", "intSrvFlowWeight"), ("INT-SERV-MIB", "intSrvFlowQueue"), ("INT-SERV-MIB", "intSrvFlowMinTU"), ("INT-SERV-MIB", "intSrvFlowMaxTU"), ("INT-SERV-MIB", "intSrvFlowDiscard"), ("INT-SERV-MIB", "intSrvFlowPoliced"), ("INT-SERV-MIB", "intSrvFlowService"), ("INT-SERV-MIB", "intSrvFlowIfAddr"), ("INT-SERV-MIB", "intSrvFlowOrder"), ("INT-SERV-MIB", "intSrvFlowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvFlowsGroup = intSrvFlowsGroup.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowsGroup.setDescription('These objects are required for Systems sup- porting the Integrated Services Architecture.')
intSrvGenObjectsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 52, 4, 1, 3)).setObjects(("INT-SERV-MIB", "intSrvFlowNewIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvGenObjectsGroup = intSrvGenObjectsGroup.setStatus('current')
if mibBuilder.loadTexts: intSrvGenObjectsGroup.setDescription('These objects are required for Systems sup- porting the Integrated Services Architecture.')
mibBuilder.exportSymbols("INT-SERV-MIB", BitRate=BitRate, intSrvIfAttribAllocatedBits=intSrvIfAttribAllocatedBits, intSrvFlowMaxTU=intSrvFlowMaxTU, intSrvFlowOrder=intSrvFlowOrder, PYSNMP_MODULE_ID=intSrv, Protocol=Protocol, intSrvIfAttribAllocatedBuffer=intSrvIfAttribAllocatedBuffer, intSrvFlowDestAddr=intSrvFlowDestAddr, intSrvFlowBurst=intSrvFlowBurst, intSrvIfAttribFlows=intSrvIfAttribFlows, intSrvFlowTable=intSrvFlowTable, intSrvFlowEntry=intSrvFlowEntry, intSrvFlowSenderAddrLength=intSrvFlowSenderAddrLength, intSrvIfAttribGroup=intSrvIfAttribGroup, intSrvFlowInterface=intSrvFlowInterface, intSrvFlowDestAddrLength=intSrvFlowDestAddrLength, intSrvFlowDestPort=intSrvFlowDestPort, BurstSize=BurstSize, intSrvFlowStatus=intSrvFlowStatus, intSrvIfAttribMaxAllocatedBits=intSrvIfAttribMaxAllocatedBits, intSrvFlowNewIndex=intSrvFlowNewIndex, intSrvGroups=intSrvGroups, MessageSize=MessageSize, intSrvFlowRate=intSrvFlowRate, intSrvFlowPort=intSrvFlowPort, intSrvFlowIfAddr=intSrvFlowIfAddr, SessionType=SessionType, intSrvIfAttribTable=intSrvIfAttribTable, intSrvIfAttribPropagationDelay=intSrvIfAttribPropagationDelay, intSrvFlowService=intSrvFlowService, intSrvFlowsGroup=intSrvFlowsGroup, intSrvFlowWeight=intSrvFlowWeight, intSrvFlowMinTU=intSrvFlowMinTU, intSrvFlowProtocol=intSrvFlowProtocol, intSrvFlowOwner=intSrvFlowOwner, intSrvIfAttribEntry=intSrvIfAttribEntry, intSrvFlowSenderAddr=intSrvFlowSenderAddr, QosService=QosService, SessionNumber=SessionNumber, intSrvObjects=intSrvObjects, intSrvGenObjects=intSrvGenObjects, intSrvFlowFlowId=intSrvFlowFlowId, intSrvCompliances=intSrvCompliances, intSrv=intSrv, intSrvFlowNumber=intSrvFlowNumber, intSrvNotifications=intSrvNotifications, intSrvFlowQueue=intSrvFlowQueue, intSrvFlowBestEffort=intSrvFlowBestEffort, intSrvFlowType=intSrvFlowType, intSrvCompliance=intSrvCompliance, Port=Port, intSrvIfAttribStatus=intSrvIfAttribStatus, intSrvFlowPoliced=intSrvFlowPoliced, intSrvFlowDiscard=intSrvFlowDiscard, intSrvGenObjectsGroup=intSrvGenObjectsGroup, intSrvConformance=intSrvConformance)
| pysnmp-with-texts/INT-SERV-MIB.py | 25,716 | PySNMP MIB module INT-SERV-MIB (http://snmplabs.com/pysmi) ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/INT-SERV-MIB Produced by pysmi-0.3.4 at Wed May 1 12:18:45 2019 On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) | 318 | en | 0.340998 |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
try:
import collections.abc as collections_abc # only works on python 3.3+
except ImportError:
import collections as collections_abc
from six import iteritems, string_types
from elasticsearch7.helpers import scan
from elasticsearch7.exceptions import TransportError
from .query import Q, Bool
from .aggs import A, AggBase
from .utils import DslBase, AttrDict
from .response import Response, Hit
from .connections import get_connection
from .exceptions import IllegalOperation
class QueryProxy(object):
"""
Simple proxy around DSL objects (queries) that can be called
(to add query/post_filter) and also allows attribute access which is proxied to
the wrapped query.
"""
def __init__(self, search, attr_name):
self._search = search
self._proxied = None
self._attr_name = attr_name
def __nonzero__(self):
return self._proxied is not None
__bool__ = __nonzero__
def __call__(self, *args, **kwargs):
s = self._search._clone()
# we cannot use self._proxied since we just cloned self._search and
# need to access the new self on the clone
proxied = getattr(s, self._attr_name)
if proxied._proxied is None:
proxied._proxied = Q(*args, **kwargs)
else:
proxied._proxied &= Q(*args, **kwargs)
# always return search to be chainable
return s
def __getattr__(self, attr_name):
return getattr(self._proxied, attr_name)
def __setattr__(self, attr_name, value):
if not attr_name.startswith("_"):
self._proxied = Q(self._proxied.to_dict())
setattr(self._proxied, attr_name, value)
super(QueryProxy, self).__setattr__(attr_name, value)
def __getstate__(self):
return self._search, self._proxied, self._attr_name
def __setstate__(self, state):
self._search, self._proxied, self._attr_name = state
class ProxyDescriptor(object):
"""
Simple descriptor to enable setting of queries and filters as:
s = Search()
s.query = Q(...)
"""
def __init__(self, name):
self._attr_name = "_%s_proxy" % name
def __get__(self, instance, owner):
return getattr(instance, self._attr_name)
def __set__(self, instance, value):
proxy = getattr(instance, self._attr_name)
proxy._proxied = Q(value)
class AggsProxy(AggBase, DslBase):
name = "aggs"
def __init__(self, search):
self._base = self
self._search = search
self._params = {"aggs": {}}
def to_dict(self):
return super(AggsProxy, self).to_dict().get("aggs", {})
class Request(object):
def __init__(self, using="default", index=None, doc_type=None, extra=None):
self._using = using
self._index = None
if isinstance(index, (tuple, list)):
self._index = list(index)
elif index:
self._index = [index]
self._doc_type = []
self._doc_type_map = {}
if isinstance(doc_type, (tuple, list)):
self._doc_type.extend(doc_type)
elif isinstance(doc_type, collections_abc.Mapping):
self._doc_type.extend(doc_type.keys())
self._doc_type_map.update(doc_type)
elif doc_type:
self._doc_type.append(doc_type)
self._params = {}
self._extra = extra or {}
def __eq__(self, other):
return (
isinstance(other, Request)
and other._params == self._params
and other._index == self._index
and other._doc_type == self._doc_type
and other.to_dict() == self.to_dict()
)
def __copy__(self):
return self._clone()
def params(self, **kwargs):
"""
Specify query params to be used when executing the search. All the
keyword arguments will override the current values. See
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
for all available parameters.
Example::
s = Search()
s = s.params(routing='user-1', preference='local')
"""
s = self._clone()
s._params.update(kwargs)
return s
def index(self, *index):
"""
Set the index for the search. If called empty it will remove all information.
Example:
s = Search()
s = s.index('twitter-2015.01.01', 'twitter-2015.01.02')
s = s.index(['twitter-2015.01.01', 'twitter-2015.01.02'])
"""
# .index() resets
s = self._clone()
if not index:
s._index = None
else:
indexes = []
for i in index:
if isinstance(i, string_types):
indexes.append(i)
elif isinstance(i, list):
indexes += i
elif isinstance(i, tuple):
indexes += list(i)
s._index = (self._index or []) + indexes
return s
def _resolve_field(self, path):
for dt in self._doc_type:
if not hasattr(dt, "_index"):
continue
field = dt._index.resolve_field(path)
if field is not None:
return field
def _resolve_nested(self, hit, parent_class=None):
doc_class = Hit
nested_path = []
nesting = hit["_nested"]
while nesting and "field" in nesting:
nested_path.append(nesting["field"])
nesting = nesting.get("_nested")
nested_path = ".".join(nested_path)
if hasattr(parent_class, "_index"):
nested_field = parent_class._index.resolve_field(nested_path)
else:
nested_field = self._resolve_field(nested_path)
if nested_field is not None:
return nested_field._doc_class
return doc_class
def _get_result(self, hit, parent_class=None):
doc_class = Hit
dt = hit.get("_type")
if "_nested" in hit:
doc_class = self._resolve_nested(hit, parent_class)
elif dt in self._doc_type_map:
doc_class = self._doc_type_map[dt]
else:
for doc_type in self._doc_type:
if hasattr(doc_type, "_matches") and doc_type._matches(hit):
doc_class = doc_type
break
for t in hit.get("inner_hits", ()):
hit["inner_hits"][t] = Response(
self, hit["inner_hits"][t], doc_class=doc_class
)
callback = getattr(doc_class, "from_es", doc_class)
return callback(hit)
def doc_type(self, *doc_type, **kwargs):
"""
Set the type to search through. You can supply a single value or
multiple. Values can be strings or subclasses of ``Document``.
You can also pass in any keyword arguments, mapping a doc_type to a
callback that should be used instead of the Hit class.
If no doc_type is supplied any information stored on the instance will
be erased.
Example:
s = Search().doc_type('product', 'store', User, custom=my_callback)
"""
# .doc_type() resets
s = self._clone()
if not doc_type and not kwargs:
s._doc_type = []
s._doc_type_map = {}
else:
s._doc_type.extend(doc_type)
s._doc_type.extend(kwargs.keys())
s._doc_type_map.update(kwargs)
return s
def using(self, client):
"""
Associate the search request with an elasticsearch client. A fresh copy
will be returned with current instance remaining unchanged.
:arg client: an instance of ``elasticsearch.Elasticsearch`` to use or
an alias to look up in ``elasticsearch_dsl.connections``
"""
s = self._clone()
s._using = client
return s
def extra(self, **kwargs):
"""
Add extra keys to the request body. Mostly here for backwards
compatibility.
"""
s = self._clone()
if "from_" in kwargs:
kwargs["from"] = kwargs.pop("from_")
s._extra.update(kwargs)
return s
def _clone(self):
s = self.__class__(
using=self._using, index=self._index, doc_type=self._doc_type
)
s._doc_type_map = self._doc_type_map.copy()
s._extra = self._extra.copy()
s._params = self._params.copy()
return s
class Search(Request):
query = ProxyDescriptor("query")
post_filter = ProxyDescriptor("post_filter")
def __init__(self, **kwargs):
"""
Search request to elasticsearch.
:arg using: `Elasticsearch` instance to use
:arg index: limit the search to index
:arg doc_type: only query this type.
All the parameters supplied (or omitted) at creation type can be later
overridden by methods (`using`, `index` and `doc_type` respectively).
"""
super(Search, self).__init__(**kwargs)
self.aggs = AggsProxy(self)
self._sort = []
self._source = None
self._highlight = {}
self._highlight_opts = {}
self._suggest = {}
self._script_fields = {}
self._response_class = Response
self._query_proxy = QueryProxy(self, "query")
self._post_filter_proxy = QueryProxy(self, "post_filter")
def filter(self, *args, **kwargs):
return self.query(Bool(filter=[Q(*args, **kwargs)]))
def exclude(self, *args, **kwargs):
return self.query(Bool(filter=[~Q(*args, **kwargs)]))
def __iter__(self):
"""
Iterate over the hits.
"""
return iter(self.execute())
def __getitem__(self, n):
"""
Support slicing the `Search` instance for pagination.
Slicing equates to the from/size parameters. E.g.::
s = Search().query(...)[0:25]
is equivalent to::
s = Search().query(...).extra(from_=0, size=25)
"""
s = self._clone()
if isinstance(n, slice):
# If negative slicing, abort.
if n.start and n.start < 0 or n.stop and n.stop < 0:
raise ValueError("Search does not support negative slicing.")
# Elasticsearch won't get all results so we default to size: 10 if
# stop not given.
s._extra["from"] = n.start or 0
s._extra["size"] = max(
0, n.stop - (n.start or 0) if n.stop is not None else 10
)
return s
else: # This is an index lookup, equivalent to slicing by [n:n+1].
# If negative index, abort.
if n < 0:
raise ValueError("Search does not support negative indexing.")
s._extra["from"] = n
s._extra["size"] = 1
return s
@classmethod
def from_dict(cls, d):
"""
Construct a new `Search` instance from a raw dict containing the search
body. Useful when migrating from raw dictionaries.
Example::
s = Search.from_dict({
"query": {
"bool": {
"must": [...]
}
},
"aggs": {...}
})
s = s.filter('term', published=True)
"""
s = cls()
s.update_from_dict(d)
return s
def _clone(self):
"""
Return a clone of the current search request. Performs a shallow copy
of all the underlying objects. Used internally by most state modifying
APIs.
"""
s = super(Search, self)._clone()
s._response_class = self._response_class
s._sort = self._sort[:]
s._source = copy.copy(self._source) if self._source is not None else None
s._highlight = self._highlight.copy()
s._highlight_opts = self._highlight_opts.copy()
s._suggest = self._suggest.copy()
s._script_fields = self._script_fields.copy()
for x in ("query", "post_filter"):
getattr(s, x)._proxied = getattr(self, x)._proxied
# copy top-level bucket definitions
if self.aggs._params.get("aggs"):
s.aggs._params = {"aggs": self.aggs._params["aggs"].copy()}
return s
def response_class(self, cls):
"""
Override the default wrapper used for the response.
"""
s = self._clone()
s._response_class = cls
return s
def update_from_dict(self, d):
"""
Apply options from a serialized body to the current instance. Modifies
the object in-place. Used mostly by ``from_dict``.
"""
d = d.copy()
if "query" in d:
self.query._proxied = Q(d.pop("query"))
if "post_filter" in d:
self.post_filter._proxied = Q(d.pop("post_filter"))
aggs = d.pop("aggs", d.pop("aggregations", {}))
if aggs:
self.aggs._params = {
"aggs": {name: A(value) for (name, value) in iteritems(aggs)}
}
if "sort" in d:
self._sort = d.pop("sort")
if "_source" in d:
self._source = d.pop("_source")
if "highlight" in d:
high = d.pop("highlight").copy()
self._highlight = high.pop("fields")
self._highlight_opts = high
if "suggest" in d:
self._suggest = d.pop("suggest")
if "text" in self._suggest:
text = self._suggest.pop("text")
for s in self._suggest.values():
s.setdefault("text", text)
if "script_fields" in d:
self._script_fields = d.pop("script_fields")
self._extra.update(d)
return self
def script_fields(self, **kwargs):
"""
Define script fields to be calculated on hits. See
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-script-fields.html
for more details.
Example::
s = Search()
s = s.script_fields(times_two="doc['field'].value * 2")
s = s.script_fields(
times_three={
'script': {
'inline': "doc['field'].value * params.n",
'params': {'n': 3}
}
}
)
"""
s = self._clone()
for name in kwargs:
if isinstance(kwargs[name], string_types):
kwargs[name] = {"script": kwargs[name]}
s._script_fields.update(kwargs)
return s
def source(self, fields=None, **kwargs):
"""
Selectively control how the _source field is returned.
:arg fields: wildcard string, array of wildcards, or dictionary of includes and excludes
If ``fields`` is None, the entire document will be returned for
each hit. If fields is a dictionary with keys of 'includes' and/or
'excludes' the fields will be either included or excluded appropriately.
Calling this multiple times with the same named parameter will override the
previous values with the new ones.
Example::
s = Search()
s = s.source(includes=['obj1.*'], excludes=["*.description"])
s = Search()
s = s.source(includes=['obj1.*']).source(excludes=["*.description"])
"""
s = self._clone()
if fields and kwargs:
raise ValueError("You cannot specify fields and kwargs at the same time.")
if fields is not None:
s._source = fields
return s
if kwargs and not isinstance(s._source, dict):
s._source = {}
for key, value in kwargs.items():
if value is None:
try:
del s._source[key]
except KeyError:
pass
else:
s._source[key] = value
return s
def sort(self, *keys):
"""
Add sorting information to the search request. If called without
arguments it will remove all sort requirements. Otherwise it will
replace them. Acceptable arguments are::
'some.field'
'-some.other.field'
{'different.field': {'any': 'dict'}}
so for example::
s = Search().sort(
'category',
'-title',
{"price" : {"order" : "asc", "mode" : "avg"}}
)
will sort by ``category``, ``title`` (in descending order) and
``price`` in ascending order using the ``avg`` mode.
The API returns a copy of the Search object and can thus be chained.
"""
s = self._clone()
s._sort = []
for k in keys:
if isinstance(k, string_types) and k.startswith("-"):
if k[1:] == "_score":
raise IllegalOperation("Sorting by `-_score` is not allowed.")
k = {k[1:]: {"order": "desc"}}
s._sort.append(k)
return s
def highlight_options(self, **kwargs):
"""
Update the global highlighting options used for this request. For
example::
s = Search()
s = s.highlight_options(order='score')
"""
s = self._clone()
s._highlight_opts.update(kwargs)
return s
def highlight(self, *fields, **kwargs):
"""
Request highlighting of some fields. All keyword arguments passed in will be
used as parameters for all the fields in the ``fields`` parameter. Example::
Search().highlight('title', 'body', fragment_size=50)
will produce the equivalent of::
{
"highlight": {
"fields": {
"body": {"fragment_size": 50},
"title": {"fragment_size": 50}
}
}
}
If you want to have different options for different fields
you can call ``highlight`` twice::
Search().highlight('title', fragment_size=50).highlight('body', fragment_size=100)
which will produce::
{
"highlight": {
"fields": {
"body": {"fragment_size": 100},
"title": {"fragment_size": 50}
}
}
}
"""
s = self._clone()
for f in fields:
s._highlight[f] = kwargs
return s
def suggest(self, name, text, **kwargs):
"""
Add a suggestions request to the search.
:arg name: name of the suggestion
:arg text: text to suggest on
All keyword arguments will be added to the suggestions body. For example::
s = Search()
s = s.suggest('suggestion-1', 'Elasticsearch', term={'field': 'body'})
"""
s = self._clone()
s._suggest[name] = {"text": text}
s._suggest[name].update(kwargs)
return s
def to_dict(self, count=False, **kwargs):
"""
Serialize the search into the dictionary that will be sent over as the
request's body.
:arg count: a flag to specify if we are interested in a body for count -
no aggregations, no pagination bounds etc.
All additional keyword arguments will be included into the dictionary.
"""
d = {}
if self.query:
d["query"] = self.query.to_dict()
# count request doesn't care for sorting and other things
if not count:
if self.post_filter:
d["post_filter"] = self.post_filter.to_dict()
if self.aggs.aggs:
d.update(self.aggs.to_dict())
if self._sort:
d["sort"] = self._sort
d.update(self._extra)
if self._source not in (None, {}):
d["_source"] = self._source
if self._highlight:
d["highlight"] = {"fields": self._highlight}
d["highlight"].update(self._highlight_opts)
if self._suggest:
d["suggest"] = self._suggest
if self._script_fields:
d["script_fields"] = self._script_fields
d.update(kwargs)
return d
def count(self):
"""
Return the number of hits matching the query and filters. Note that
only the actual number is returned.
"""
if hasattr(self, "_response") and self._response.hits.total.relation == "eq":
return self._response.hits.total.value
es = get_connection(self._using)
d = self.to_dict(count=True)
# TODO: failed shards detection
return es.count(index=self._index, body=d, **self._params)["count"]
def execute(self, ignore_cache=False):
"""
Execute the search and return an instance of ``Response`` wrapping all
the data.
:arg ignore_cache: if set to ``True``, consecutive calls will hit
ES, while cached result will be ignored. Defaults to `False`
"""
if ignore_cache or not hasattr(self, "_response"):
es = get_connection(self._using)
self._response = self._response_class(
self, es.search(index=self._index, body=self.to_dict(), **self._params)
)
return self._response
def scan(self):
"""
Turn the search into a scan search and return a generator that will
iterate over all the documents matching the query.
Use ``params`` method to specify any additional arguments you with to
pass to the underlying ``scan`` helper from ``elasticsearch-py`` -
https://elasticsearch-py.readthedocs.io/en/master/helpers.html#elasticsearch.helpers.scan
"""
es = get_connection(self._using)
for hit in scan(es, query=self.to_dict(), index=self._index, **self._params):
yield self._get_result(hit)
def delete(self):
"""
delete() executes the query by delegating to delete_by_query()
"""
es = get_connection(self._using)
return AttrDict(
es.delete_by_query(index=self._index, body=self.to_dict(), **self._params)
)
class MultiSearch(Request):
"""
Combine multiple :class:`~elasticsearch_dsl.Search` objects into a single
request.
"""
def __init__(self, **kwargs):
super(MultiSearch, self).__init__(**kwargs)
self._searches = []
def __getitem__(self, key):
return self._searches[key]
def __iter__(self):
return iter(self._searches)
def _clone(self):
ms = super(MultiSearch, self)._clone()
ms._searches = self._searches[:]
return ms
def add(self, search):
"""
Adds a new :class:`~elasticsearch_dsl.Search` object to the request::
ms = MultiSearch(index='my-index')
ms = ms.add(Search(doc_type=Category).filter('term', category='python'))
ms = ms.add(Search(doc_type=Blog))
"""
ms = self._clone()
ms._searches.append(search)
return ms
def to_dict(self):
out = []
for s in self._searches:
meta = {}
if s._index:
meta["index"] = s._index
meta.update(s._params)
out.append(meta)
out.append(s.to_dict())
return out
def execute(self, ignore_cache=False, raise_on_error=True):
"""
Execute the multi search request and return a list of search results.
"""
if ignore_cache or not hasattr(self, "_response"):
es = get_connection(self._using)
responses = es.msearch(
index=self._index, body=self.to_dict(), **self._params
)
out = []
for s, r in zip(self._searches, responses["responses"]):
if r.get("error", False):
if raise_on_error:
raise TransportError("N/A", r["error"]["type"], r["error"])
r = None
else:
r = Response(s, r)
out.append(r)
self._response = out
return self._response
| elasticsearch_dsl/search.py | 25,381 | Combine multiple :class:`~elasticsearch_dsl.Search` objects into a single
request.
Simple descriptor to enable setting of queries and filters as:
s = Search()
s.query = Q(...)
Simple proxy around DSL objects (queries) that can be called
(to add query/post_filter) and also allows attribute access which is proxied to
the wrapped query.
Support slicing the `Search` instance for pagination.
Slicing equates to the from/size parameters. E.g.::
s = Search().query(...)[0:25]
is equivalent to::
s = Search().query(...).extra(from_=0, size=25)
Search request to elasticsearch.
:arg using: `Elasticsearch` instance to use
:arg index: limit the search to index
:arg doc_type: only query this type.
All the parameters supplied (or omitted) at creation type can be later
overridden by methods (`using`, `index` and `doc_type` respectively).
Iterate over the hits.
Return a clone of the current search request. Performs a shallow copy
of all the underlying objects. Used internally by most state modifying
APIs.
Adds a new :class:`~elasticsearch_dsl.Search` object to the request::
ms = MultiSearch(index='my-index')
ms = ms.add(Search(doc_type=Category).filter('term', category='python'))
ms = ms.add(Search(doc_type=Blog))
Return the number of hits matching the query and filters. Note that
only the actual number is returned.
delete() executes the query by delegating to delete_by_query()
Set the type to search through. You can supply a single value or
multiple. Values can be strings or subclasses of ``Document``.
You can also pass in any keyword arguments, mapping a doc_type to a
callback that should be used instead of the Hit class.
If no doc_type is supplied any information stored on the instance will
be erased.
Example:
s = Search().doc_type('product', 'store', User, custom=my_callback)
Execute the search and return an instance of ``Response`` wrapping all
the data.
:arg ignore_cache: if set to ``True``, consecutive calls will hit
ES, while cached result will be ignored. Defaults to `False`
Execute the multi search request and return a list of search results.
Add extra keys to the request body. Mostly here for backwards
compatibility.
Construct a new `Search` instance from a raw dict containing the search
body. Useful when migrating from raw dictionaries.
Example::
s = Search.from_dict({
"query": {
"bool": {
"must": [...]
}
},
"aggs": {...}
})
s = s.filter('term', published=True)
Request highlighting of some fields. All keyword arguments passed in will be
used as parameters for all the fields in the ``fields`` parameter. Example::
Search().highlight('title', 'body', fragment_size=50)
will produce the equivalent of::
{
"highlight": {
"fields": {
"body": {"fragment_size": 50},
"title": {"fragment_size": 50}
}
}
}
If you want to have different options for different fields
you can call ``highlight`` twice::
Search().highlight('title', fragment_size=50).highlight('body', fragment_size=100)
which will produce::
{
"highlight": {
"fields": {
"body": {"fragment_size": 100},
"title": {"fragment_size": 50}
}
}
}
Update the global highlighting options used for this request. For
example::
s = Search()
s = s.highlight_options(order='score')
Set the index for the search. If called empty it will remove all information.
Example:
s = Search()
s = s.index('twitter-2015.01.01', 'twitter-2015.01.02')
s = s.index(['twitter-2015.01.01', 'twitter-2015.01.02'])
Specify query params to be used when executing the search. All the
keyword arguments will override the current values. See
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
for all available parameters.
Example::
s = Search()
s = s.params(routing='user-1', preference='local')
Override the default wrapper used for the response.
Turn the search into a scan search and return a generator that will
iterate over all the documents matching the query.
Use ``params`` method to specify any additional arguments you with to
pass to the underlying ``scan`` helper from ``elasticsearch-py`` -
https://elasticsearch-py.readthedocs.io/en/master/helpers.html#elasticsearch.helpers.scan
Define script fields to be calculated on hits. See
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-script-fields.html
for more details.
Example::
s = Search()
s = s.script_fields(times_two="doc['field'].value * 2")
s = s.script_fields(
times_three={
'script': {
'inline': "doc['field'].value * params.n",
'params': {'n': 3}
}
}
)
Add sorting information to the search request. If called without
arguments it will remove all sort requirements. Otherwise it will
replace them. Acceptable arguments are::
'some.field'
'-some.other.field'
{'different.field': {'any': 'dict'}}
so for example::
s = Search().sort(
'category',
'-title',
{"price" : {"order" : "asc", "mode" : "avg"}}
)
will sort by ``category``, ``title`` (in descending order) and
``price`` in ascending order using the ``avg`` mode.
The API returns a copy of the Search object and can thus be chained.
Selectively control how the _source field is returned.
:arg fields: wildcard string, array of wildcards, or dictionary of includes and excludes
If ``fields`` is None, the entire document will be returned for
each hit. If fields is a dictionary with keys of 'includes' and/or
'excludes' the fields will be either included or excluded appropriately.
Calling this multiple times with the same named parameter will override the
previous values with the new ones.
Example::
s = Search()
s = s.source(includes=['obj1.*'], excludes=["*.description"])
s = Search()
s = s.source(includes=['obj1.*']).source(excludes=["*.description"])
Add a suggestions request to the search.
:arg name: name of the suggestion
:arg text: text to suggest on
All keyword arguments will be added to the suggestions body. For example::
s = Search()
s = s.suggest('suggestion-1', 'Elasticsearch', term={'field': 'body'})
Serialize the search into the dictionary that will be sent over as the
request's body.
:arg count: a flag to specify if we are interested in a body for count -
no aggregations, no pagination bounds etc.
All additional keyword arguments will be included into the dictionary.
Apply options from a serialized body to the current instance. Modifies
the object in-place. Used mostly by ``from_dict``.
Associate the search request with an elasticsearch client. A fresh copy
will be returned with current instance remaining unchanged.
:arg client: an instance of ``elasticsearch.Elasticsearch`` to use or
an alias to look up in ``elasticsearch_dsl.connections``
Licensed to Elasticsearch B.V. under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. Elasticsearch B.V. licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. only works on python 3.3+ we cannot use self._proxied since we just cloned self._search and need to access the new self on the clone always return search to be chainable .index() resets .doc_type() resets If negative slicing, abort. Elasticsearch won't get all results so we default to size: 10 if stop not given. This is an index lookup, equivalent to slicing by [n:n+1]. If negative index, abort. copy top-level bucket definitions count request doesn't care for sorting and other things TODO: failed shards detection | 8,326 | en | 0.706335 |
"""Test the code generating time series with the order totals.
Unless otherwise noted, each `time_step` is 60 minutes long implying
12 time steps per day (i.e., we use `LONG_TIME_STEP` by default).
"""
import datetime
import pandas as pd
import pytest
from tests import config as test_config
from urban_meal_delivery import config
@pytest.fixture
def good_predict_at():
"""A `predict_at` within `START`-`END` and ...
... a long enough history so that either `SHORT_TRAIN_HORIZON`
or `LONG_TRAIN_HORIZON` works.
"""
return datetime.datetime(
test_config.END.year,
test_config.END.month,
test_config.END.day,
test_config.NOON,
0,
)
@pytest.fixture
def bad_predict_at():
"""A `predict_at` within `START`-`END` but ...
... not a long enough history so that both `SHORT_TRAIN_HORIZON`
and `LONG_TRAIN_HORIZON` do not work.
"""
predict_day = test_config.END - datetime.timedelta(weeks=6, days=1)
return datetime.datetime(
predict_day.year, predict_day.month, predict_day.day, test_config.NOON, 0,
)
class TestMakeHorizontalTimeSeries:
"""Test the `OrderHistory.make_horizontal_ts()` method."""
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_wrong_pixel(self, order_history, good_predict_at, train_horizon):
"""A `pixel_id` that is not in the `grid`."""
with pytest.raises(LookupError):
order_history.make_horizontal_ts(
pixel_id=999_999,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_time_series_are_series(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The time series come as a `pd.Series`."""
result = order_history.make_horizontal_ts(
pixel_id=good_pixel_id,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
training_ts, _, actuals_ts = result
assert isinstance(training_ts, pd.Series)
assert training_ts.name == 'n_orders'
assert isinstance(actuals_ts, pd.Series)
assert actuals_ts.name == 'n_orders'
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_time_series_have_correct_length(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The length of a training time series must be a multiple of `7` ...
... whereas the time series with the actual order counts has only `1` value.
"""
result = order_history.make_horizontal_ts(
pixel_id=good_pixel_id,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
training_ts, _, actuals_ts = result
assert len(training_ts) == 7 * train_horizon
assert len(actuals_ts) == 1
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_frequency_is_number_of_weekdays(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The `frequency` must be `7`."""
result = order_history.make_horizontal_ts(
pixel_id=good_pixel_id,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
_, frequency, _ = result # noqa:WPS434
assert frequency == 7
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_no_long_enough_history1(
self, order_history, good_pixel_id, bad_predict_at, train_horizon,
):
"""If the `predict_at` day is too early in the `START`-`END` horizon ...
... the history of order totals is not long enough.
"""
with pytest.raises(RuntimeError):
order_history.make_horizontal_ts(
pixel_id=good_pixel_id,
predict_at=bad_predict_at,
train_horizon=train_horizon,
)
def test_no_long_enough_history2(
self, order_history, good_pixel_id, good_predict_at,
):
"""If the `train_horizon` is longer than the `START`-`END` horizon ...
... the history of order totals can never be long enough.
"""
with pytest.raises(RuntimeError):
order_history.make_horizontal_ts(
pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=999,
)
class TestMakeVerticalTimeSeries:
"""Test the `OrderHistory.make_vertical_ts()` method."""
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_wrong_pixel(self, order_history, good_predict_at, train_horizon):
"""A `pixel_id` that is not in the `grid`."""
with pytest.raises(LookupError):
order_history.make_vertical_ts(
pixel_id=999_999,
predict_day=good_predict_at.date(),
train_horizon=train_horizon,
)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_time_series_are_series(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The time series come as `pd.Series`."""
result = order_history.make_vertical_ts(
pixel_id=good_pixel_id,
predict_day=good_predict_at.date(),
train_horizon=train_horizon,
)
training_ts, _, actuals_ts = result
assert isinstance(training_ts, pd.Series)
assert training_ts.name == 'n_orders'
assert isinstance(actuals_ts, pd.Series)
assert actuals_ts.name == 'n_orders'
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_time_series_have_correct_length(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The length of a training time series is the product of the ...
... weekly time steps (i.e., product of `7` and the number of daily time steps)
and the `train_horizon` in weeks.
The time series with the actual order counts always holds one observation
per time step of a day.
"""
result = order_history.make_vertical_ts(
pixel_id=good_pixel_id,
predict_day=good_predict_at.date(),
train_horizon=train_horizon,
)
training_ts, _, actuals_ts = result
n_daily_time_steps = (
60
* (config.SERVICE_END - config.SERVICE_START)
// test_config.LONG_TIME_STEP
)
assert len(training_ts) == 7 * n_daily_time_steps * train_horizon
assert len(actuals_ts) == n_daily_time_steps
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_frequency_is_number_number_of_weekly_time_steps(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The `frequency` is the number of weekly time steps."""
result = order_history.make_vertical_ts(
pixel_id=good_pixel_id,
predict_day=good_predict_at.date(),
train_horizon=train_horizon,
)
_, frequency, _ = result # noqa:WPS434
n_daily_time_steps = (
60
* (config.SERVICE_END - config.SERVICE_START)
// test_config.LONG_TIME_STEP
)
assert frequency == 7 * n_daily_time_steps
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_no_long_enough_history1(
self, order_history, good_pixel_id, bad_predict_at, train_horizon,
):
"""If the `predict_at` day is too early in the `START`-`END` horizon ...
... the history of order totals is not long enough.
"""
with pytest.raises(RuntimeError):
order_history.make_vertical_ts(
pixel_id=good_pixel_id,
predict_day=bad_predict_at.date(),
train_horizon=train_horizon,
)
def test_no_long_enough_history2(
self, order_history, good_pixel_id, good_predict_at,
):
"""If the `train_horizon` is longer than the `START`-`END` horizon ...
... the history of order totals can never be long enough.
"""
with pytest.raises(RuntimeError):
order_history.make_vertical_ts(
pixel_id=good_pixel_id,
predict_day=good_predict_at.date(),
train_horizon=999,
)
class TestMakeRealTimeTimeSeries:
"""Test the `OrderHistory.make_realtime_ts()` method."""
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_wrong_pixel(self, order_history, good_predict_at, train_horizon):
"""A `pixel_id` that is not in the `grid`."""
with pytest.raises(LookupError):
order_history.make_realtime_ts(
pixel_id=999_999,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_time_series_are_series(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The time series come as `pd.Series`."""
result = order_history.make_realtime_ts(
pixel_id=good_pixel_id,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
training_ts, _, actuals_ts = result
assert isinstance(training_ts, pd.Series)
assert training_ts.name == 'n_orders'
assert isinstance(actuals_ts, pd.Series)
assert actuals_ts.name == 'n_orders'
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_time_series_have_correct_length1(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The length of a training time series is the product of the ...
... weekly time steps (i.e., product of `7` and the number of daily time steps)
and the `train_horizon` in weeks; however, this assertion only holds if
we predict the first `time_step` of the day.
The time series with the actual order counts always holds `1` value.
"""
predict_at = datetime.datetime(
good_predict_at.year,
good_predict_at.month,
good_predict_at.day,
config.SERVICE_START,
0,
)
result = order_history.make_realtime_ts(
pixel_id=good_pixel_id, predict_at=predict_at, train_horizon=train_horizon,
)
training_ts, _, actuals_ts = result
n_daily_time_steps = (
60
* (config.SERVICE_END - config.SERVICE_START)
// test_config.LONG_TIME_STEP
)
assert len(training_ts) == 7 * n_daily_time_steps * train_horizon
assert len(actuals_ts) == 1
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_time_series_have_correct_length2(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The length of a training time series is the product of the ...
... weekly time steps (i.e., product of `7` and the number of daily time steps)
and the `train_horizon` in weeks; however, this assertion only holds if
we predict the first `time_step` of the day. Predicting any other `time_step`
means that the training time series becomes longer by the number of time steps
before the one being predicted.
The time series with the actual order counts always holds `1` value.
"""
assert good_predict_at.hour == test_config.NOON
result = order_history.make_realtime_ts(
pixel_id=good_pixel_id,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
training_ts, _, actuals_ts = result
n_daily_time_steps = (
60
* (config.SERVICE_END - config.SERVICE_START)
// test_config.LONG_TIME_STEP
)
n_time_steps_before = (
60 * (test_config.NOON - config.SERVICE_START) // test_config.LONG_TIME_STEP
)
assert (
len(training_ts)
== 7 * n_daily_time_steps * train_horizon + n_time_steps_before
)
assert len(actuals_ts) == 1
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_frequency_is_number_number_of_weekly_time_steps(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The `frequency` is the number of weekly time steps."""
result = order_history.make_realtime_ts(
pixel_id=good_pixel_id,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
_, frequency, _ = result # noqa:WPS434
n_daily_time_steps = (
60
* (config.SERVICE_END - config.SERVICE_START)
// test_config.LONG_TIME_STEP
)
assert frequency == 7 * n_daily_time_steps
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_no_long_enough_history1(
self, order_history, good_pixel_id, bad_predict_at, train_horizon,
):
"""If the `predict_at` day is too early in the `START`-`END` horizon ...
... the history of order totals is not long enough.
"""
with pytest.raises(RuntimeError):
order_history.make_realtime_ts(
pixel_id=good_pixel_id,
predict_at=bad_predict_at,
train_horizon=train_horizon,
)
def test_no_long_enough_history2(
self, order_history, good_pixel_id, good_predict_at,
):
"""If the `train_horizon` is longer than the `START`-`END` horizon ...
... the history of order totals can never be long enough.
"""
with pytest.raises(RuntimeError):
order_history.make_realtime_ts(
pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=999,
)
| tests/forecasts/timify/test_make_time_series.py | 14,257 | Test the `OrderHistory.make_horizontal_ts()` method.
Test the `OrderHistory.make_realtime_ts()` method.
Test the `OrderHistory.make_vertical_ts()` method.
A `predict_at` within `START`-`END` but ...
... not a long enough history so that both `SHORT_TRAIN_HORIZON`
and `LONG_TRAIN_HORIZON` do not work.
A `predict_at` within `START`-`END` and ...
... a long enough history so that either `SHORT_TRAIN_HORIZON`
or `LONG_TRAIN_HORIZON` works.
The `frequency` is the number of weekly time steps.
The `frequency` is the number of weekly time steps.
The `frequency` must be `7`.
If the `predict_at` day is too early in the `START`-`END` horizon ...
... the history of order totals is not long enough.
If the `predict_at` day is too early in the `START`-`END` horizon ...
... the history of order totals is not long enough.
If the `predict_at` day is too early in the `START`-`END` horizon ...
... the history of order totals is not long enough.
If the `train_horizon` is longer than the `START`-`END` horizon ...
... the history of order totals can never be long enough.
If the `train_horizon` is longer than the `START`-`END` horizon ...
... the history of order totals can never be long enough.
If the `train_horizon` is longer than the `START`-`END` horizon ...
... the history of order totals can never be long enough.
The time series come as a `pd.Series`.
The time series come as `pd.Series`.
The time series come as `pd.Series`.
The length of a training time series must be a multiple of `7` ...
... whereas the time series with the actual order counts has only `1` value.
The length of a training time series is the product of the ...
... weekly time steps (i.e., product of `7` and the number of daily time steps)
and the `train_horizon` in weeks.
The time series with the actual order counts always holds one observation
per time step of a day.
The length of a training time series is the product of the ...
... weekly time steps (i.e., product of `7` and the number of daily time steps)
and the `train_horizon` in weeks; however, this assertion only holds if
we predict the first `time_step` of the day.
The time series with the actual order counts always holds `1` value.
The length of a training time series is the product of the ...
... weekly time steps (i.e., product of `7` and the number of daily time steps)
and the `train_horizon` in weeks; however, this assertion only holds if
we predict the first `time_step` of the day. Predicting any other `time_step`
means that the training time series becomes longer by the number of time steps
before the one being predicted.
The time series with the actual order counts always holds `1` value.
A `pixel_id` that is not in the `grid`.
A `pixel_id` that is not in the `grid`.
A `pixel_id` that is not in the `grid`.
Test the code generating time series with the order totals.
Unless otherwise noted, each `time_step` is 60 minutes long implying
12 time steps per day (i.e., we use `LONG_TIME_STEP` by default).
noqa:WPS434 noqa:WPS434 noqa:WPS434 | 3,019 | en | 0.870829 |
"""
This file is part of the FJournal Project.
Copyright © 2019-2020, Daniele Penazzo. All Rights Reserved.
The use of this code is governed by the MIT license attached.
See the LICENSE file for the full license.
Created on: 2020-07-10
Author: Penaz
"""
from tkinter import ttk
import tkinter as tk
from models import Meal
class AddMealPopup(ttk.Frame):
"""
Defines a popup for adding meals
"""
def __init__(self, master=None, session=None):
"""
Constructor of the class
"""
super().__init__(master)
self.master = master
self.grid(row=0, column=0)
self.session = session
self.mealname = tk.StringVar()
self.create_widgets()
def create_widgets(self):
"""
Creates the widgets for the popup
"""
self.meallbl = ttk.Label(self, text="Meal Name")
self.meallbl.grid(row=0, column=0)
self.mealinput = ttk.Entry(self, textvariable=self.mealname)
self.mealinput.grid(row=0, column=1)
self.addbtn = ttk.Button(self,
text="Confirm",
command=self.add_meal)
self.addbtn.grid(row=1, column=0, columnspan=2)
def add_meal(self):
"""
Opens the Add Meal popup
"""
meal = Meal(name=self.mealname.get())
self.session.add(meal)
self.session.commit()
self.master.destroy()
| gui/addmealpopup.py | 1,448 | Defines a popup for adding meals
Constructor of the class
Opens the Add Meal popup
Creates the widgets for the popup
This file is part of the FJournal Project.
Copyright © 2019-2020, Daniele Penazzo. All Rights Reserved.
The use of this code is governed by the MIT license attached.
See the LICENSE file for the full license.
Created on: 2020-07-10
Author: Penaz | 364 | en | 0.88297 |
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
#
# SPDX-License-Identifier: MIT
#
# Adafruit PCF8523 RTC Library documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 11 21:37:36 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.todo", "sphinx.ext.intersphinx"]
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["adafruit_bus_device", "adafruit_register"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit PCF8523 RTC Library"
copyright = "2016, Philip Moyer"
author = "Philip Moyer"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Adafruit PCF8523 RTC Library v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
# htmlhelp_basename = 'AdafruitsPCF8523RTCLibrarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"AdafruitPCF8523RTCLibrary.tex",
"Adafruit PCF8523 RTC Library Documentation",
"Philip Moyer",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"AdafruitPCF8523RTCLibrary23rtclibrary",
"Adafruit PCF8523 RTC Library Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitPCF8523RTCLibrary",
"Adafruit PCF8523 RTC Library Documentation",
author,
"AdafruitPCF8523RTCLibrary",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"CircuitPython": ("https://docs.circuitpython.org/en/latest/", None),
}
| docs/conf.py | 11,218 | -*- coding: utf-8 -*- SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries SPDX-License-Identifier: MIT Adafruit PCF8523 RTC Library documentation build configuration file, created by sphinx-quickstart on Fri Nov 11 21:37:36 2016. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- General configuration ------------------------------------------------ If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Uncomment the below if you use native CircuitPython modules such as digitalio, micropython and busio. List the modules you use. Without it, the autodoc module docs will fail to generate with a warning. autodoc_mock_imports = ["adafruit_bus_device", "adafruit_register"] Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = '.rst' The encoding of source files. source_encoding = 'utf-8-sig' The master toctree document. General information about the project. The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in various other places throughout the built documents. The short X.Y version. The full version, including alpha/beta/rc tags. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. There are two options for replacing |today|: either, you set today to some non-false value, then it is used: today = '' Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This patterns also effect to html_static_path and html_extra_path The reST default role (used for this markup: `text`) to use for all documents. If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True If true, the current module name will be prepended to all description unit titles (such as .. function::). add_module_names = True If true, sectionauthor and moduleauthor directives will be shown in the output. They are ignored by default. show_authors = False The name of the Pygments (syntax highlighting) style to use. A list of ignored prefixes for module index sorting. modindex_common_prefix = [] If true, keep warnings as "system message" paragraphs in the built documents. keep_warnings = False If true, `todo` and `todoList` produce output, else they produce nothing. If this is True, todo emits a warning for each TODO entries. The default is False. -- Options for HTML output ---------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. only import and set the theme if we're building docs locally Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} Add any paths that contain custom themes here, relative to this directory. html_theme_path = [] The name for this set of Sphinx documents. "<project> v<release> documentation" by default. html_title = u'Adafruit PCF8523 RTC Library v1.0' A shorter title for the navigation bar. Default is the same as html_title. html_short_title = None The name of an image file (relative to this directory) to place at the top of the sidebar. html_logo = None The name of an image file (within the static path) to use as favicon of the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 pixels large. Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Add any extra paths that contain custom files (such as robots.txt or .htaccess) here, relative to this directory. These files are copied directly to the root of the documentation. html_extra_path = [] If not None, a 'Last updated on:' timestamp is inserted at every page bottom, using the given strftime format. The empty string is equivalent to '%b %d, %Y'. html_last_updated_fmt = None If true, SmartyPants will be used to convert quotes and dashes to typographically correct entities. html_use_smartypants = True Custom sidebar templates, maps document names to template names. html_sidebars = {} Additional templates that should be rendered to pages, maps page names to template names. html_additional_pages = {} If false, no module index is generated. html_domain_indices = True If false, no index is generated. html_use_index = True If true, the index is split into individual pages for each letter. html_split_index = False If true, links to the reST sources are added to the pages. html_show_sourcelink = True If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = True If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = True If true, an OpenSearch description file will be output, and all pages will contain a <link> tag referring to it. The value of this option must be the base URL from which the finished HTML is served. html_use_opensearch = '' This is the file name suffix for HTML files (e.g. ".xhtml"). html_file_suffix = None Language to be used for generating the HTML full-text search index. Sphinx supports the following languages: 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' html_search_language = 'en' A dictionary with options for the search language support, empty by default. 'ja' uses this config value. 'zh' user can custom change `jieba` dictionary path. html_search_options = {'type': 'default'} The name of a javascript file (relative to the configuration directory) that implements a search results scorer. If empty, the default will be used. html_search_scorer = 'scorer.js' Output file base name for HTML help builder. htmlhelp_basename = 'AdafruitsPCF8523RTCLibrarydoc' -- Options for LaTeX output --------------------------------------------- The paper size ('letterpaper' or 'a4paper'). 'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. 'preamble': '', Latex figure (float) alignment 'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). The name of an image file (relative to this directory) to place at the top of the title page. latex_logo = None For "manual" documents, if this is true, then toplevel headings are parts, not chapters. latex_use_parts = False If true, show page references after internal links. latex_show_pagerefs = False If true, show URL addresses after external links. latex_show_urls = False Documents to append as an appendix to all manuals. latex_appendices = [] It false, will not define \strong, \code, itleref, \crossref ... but only \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added packages. latex_keep_old_macro_names = True If false, no module index is generated. latex_domain_indices = True -- Options for manual page output --------------------------------------- One entry per manual page. List of tuples (source start file, name, description, authors, manual section). If true, show URL addresses after external links. man_show_urls = False -- Options for Texinfo output ------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) Documents to append as an appendix to all manuals. texinfo_appendices = [] If false, no module index is generated. texinfo_domain_indices = True How to display URL addresses: 'footnote', 'no', or 'inline'. texinfo_show_urls = 'footnote' If true, do not generate a @detailmenu in the "Top" node's menu. texinfo_no_detailmenu = False | 8,841 | en | 0.662884 |
import unittest
import numpy
import pytest
import cupy
import cupy.core._accelerator as _acc
from cupy.core import _cub_reduction
from cupy import testing
@testing.gpu
class TestSearch(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.argmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_argmax_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.argmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
def test_argmax_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmax()
@testing.for_all_dtypes(no_complex=True)
def test_argmax_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return a.argmax(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_argmin_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.argmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.argmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_tie(self, xp, dtype):
a = xp.array([0, 1, 2, 3, 0, 5], dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
def test_argmin_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
def test_argmin_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return a.argmin(axis=1)
# This class compares CUB results against NumPy's
# TODO(leofang): test axis after support is added
@testing.parameterize(*testing.product({
'shape': [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)],
'order_and_axis': (('C', -1), ('C', None), ('F', 0), ('F', None)),
'backend': ('device', 'block'),
}))
@testing.gpu
@unittest.skipUnless(cupy.cuda.cub.available, 'The CUB routine is not enabled')
class TestCubReduction(unittest.TestCase):
def setUp(self):
self.order, self.axis = self.order_and_axis
self.old_routine_accelerators = _acc.get_routine_accelerators()
self.old_reduction_accelerators = _acc.get_reduction_accelerators()
if self.backend == 'device':
if self.axis is not None:
raise unittest.SkipTest('does not support')
_acc.set_routine_accelerators(['cub'])
_acc.set_reduction_accelerators([])
elif self.backend == 'block':
_acc.set_routine_accelerators([])
_acc.set_reduction_accelerators(['cub'])
def tearDown(self):
_acc.set_routine_accelerators(self.old_routine_accelerators)
_acc.set_reduction_accelerators(self.old_reduction_accelerators)
@testing.for_dtypes('bhilBHILefdFD')
@testing.numpy_cupy_allclose(rtol=1E-5, contiguous_check=False)
def test_cub_argmin(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.order == 'C':
a = xp.ascontiguousarray(a)
else:
a = xp.asfortranarray(a)
if xp is numpy:
return a.argmin(axis=self.axis)
# xp is cupy, first ensure we really use CUB
ret = cupy.empty(()) # Cython checks return type, need to fool it
if self.backend == 'device':
func_name = 'cupy.core._routines_statistics.cub.'
func_name += 'device_reduce'
with testing.AssertFunctionIsCalled(func_name, return_value=ret):
a.argmin(axis=self.axis)
elif self.backend == 'block':
# this is the only function we can mock; the rest is cdef'd
func_name = 'cupy.core._cub_reduction.'
func_name += '_SimpleCubReductionKernel_get_cached_function'
func = _cub_reduction._SimpleCubReductionKernel_get_cached_function
if self.axis is not None and len(self.shape) > 1:
times_called = 1 # one pass
else:
times_called = 2 # two passes
with testing.AssertFunctionIsCalled(
func_name, wraps=func, times_called=times_called):
a.argmin(axis=self.axis)
# ...then perform the actual computation
return a.argmin(axis=self.axis)
@testing.for_dtypes('bhilBHILefdFD')
@testing.numpy_cupy_allclose(rtol=1E-5, contiguous_check=False)
def test_cub_argmax(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.order == 'C':
a = xp.ascontiguousarray(a)
else:
a = xp.asfortranarray(a)
if xp is numpy:
return a.argmax(axis=self.axis)
# xp is cupy, first ensure we really use CUB
ret = cupy.empty(()) # Cython checks return type, need to fool it
if self.backend == 'device':
func_name = 'cupy.core._routines_statistics.cub.'
func_name += 'device_reduce'
with testing.AssertFunctionIsCalled(func_name, return_value=ret):
a.argmax(axis=self.axis)
elif self.backend == 'block':
# this is the only function we can mock; the rest is cdef'd
func_name = 'cupy.core._cub_reduction.'
func_name += '_SimpleCubReductionKernel_get_cached_function'
func = _cub_reduction._SimpleCubReductionKernel_get_cached_function
if self.axis is not None and len(self.shape) > 1:
times_called = 1 # one pass
else:
times_called = 2 # two passes
with testing.AssertFunctionIsCalled(
func_name, wraps=func, times_called=times_called):
a.argmax(axis=self.axis)
# ...then perform the actual computation
return a.argmax(axis=self.axis)
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['argmin', 'argmax'],
'is_module': [True, False],
'shape': [(3, 4), ()],
}))
class TestArgMinMaxDtype(unittest.TestCase):
@testing.for_dtypes(
dtypes=[numpy.int8, numpy.int16, numpy.int32, numpy.int64],
name='result_dtype')
@testing.for_all_dtypes(name='in_dtype')
def test_argminmax_dtype(self, in_dtype, result_dtype):
a = testing.shaped_random(self.shape, cupy, in_dtype)
if self.is_module:
func = getattr(cupy, self.func)
y = func(a, dtype=result_dtype)
else:
func = getattr(a, self.func)
y = func(dtype=result_dtype)
assert y.shape == ()
assert y.dtype == result_dtype
@testing.parameterize(
{'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)},
{'cond_shape': (4,), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)},
{'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (3, 4)},
{'cond_shape': (3, 4), 'x_shape': (2, 3, 4), 'y_shape': (4,)},
)
@testing.gpu
class TestWhereTwoArrays(unittest.TestCase):
@testing.for_all_dtypes_combination(
names=['cond_type', 'x_type', 'y_type'])
@testing.numpy_cupy_allclose()
def test_where_two_arrays(self, xp, cond_type, x_type, y_type):
m = testing.shaped_random(self.cond_shape, xp, xp.bool_)
# Almost all values of a matrix `shaped_random` makes are not zero.
# To make a sparse matrix, we need multiply `m`.
cond = testing.shaped_random(self.cond_shape, xp, cond_type) * m
x = testing.shaped_random(self.x_shape, xp, x_type, seed=0)
y = testing.shaped_random(self.y_shape, xp, y_type, seed=1)
return xp.where(cond, x, y)
@testing.parameterize(
{'cond_shape': (2, 3, 4)},
{'cond_shape': (4,)},
{'cond_shape': (2, 3, 4)},
{'cond_shape': (3, 4)},
)
@testing.gpu
class TestWhereCond(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_where_cond(self, xp, dtype):
m = testing.shaped_random(self.cond_shape, xp, xp.bool_)
cond = testing.shaped_random(self.cond_shape, xp, dtype) * m
return xp.where(cond)
@testing.gpu
class TestWhereError(unittest.TestCase):
def test_one_argument(self):
for xp in (numpy, cupy):
cond = testing.shaped_random((3, 4), xp, dtype=xp.bool_)
x = testing.shaped_random((2, 3, 4), xp, xp.int32)
with pytest.raises(ValueError):
xp.where(cond, x)
@testing.parameterize(
{'array': numpy.random.randint(0, 2, (20,))},
{'array': numpy.random.randn(3, 2, 4)},
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestNonzero(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_nonzero(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.nonzero(array)
@testing.parameterize(
{'array': numpy.array(0)},
{'array': numpy.array(1)},
)
@testing.gpu
@testing.with_requires('numpy>=1.17.0')
class TestNonzeroZeroDimension(unittest.TestCase):
@testing.for_all_dtypes()
def test_nonzero(self, dtype):
for xp in (numpy, cupy):
array = xp.array(self.array, dtype=dtype)
with pytest.raises(DeprecationWarning):
xp.nonzero(array)
@testing.parameterize(
{'array': numpy.random.randint(0, 2, (20,))},
{'array': numpy.random.randn(3, 2, 4)},
{'array': numpy.array(0)},
{'array': numpy.array(1)},
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestFlatNonzero(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_flatnonzero(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.flatnonzero(array)
@testing.parameterize(
{'array': numpy.random.randint(0, 2, (20,))},
{'array': numpy.random.randn(3, 2, 4)},
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestArgwhere(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_argwhere(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.argwhere(array)
@testing.parameterize(
{'array': cupy.array(1)},
)
@testing.gpu
class TestArgwhereZeroDimension(unittest.TestCase):
def test_argwhere(self):
with testing.assert_warns(DeprecationWarning):
return cupy.nonzero(self.array)
@testing.gpu
class TestNanArgMin(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan2(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan3(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1, 1.0, -2.0], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan4(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan')],
dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan5(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan'), -1, 1],
dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmin_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmin_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return xp.nanargmin(a, axis=1)
@testing.gpu
class TestNanArgMax(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan2(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan3(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1, 1.0, -2.0], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan4(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan')],
dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan5(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan'), -1, 1],
dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmax_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmax_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return xp.nanargmax(a, axis=1)
@testing.gpu
@testing.parameterize(*testing.product(
{'bins': [
[],
[0, 1, 2, 4, 10],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[0.0, 1.0, 2.5, 4.0, 10.0],
[-1.0, 1.0, 2.5, 4.0, 20.0],
[1.5, 2.5, 4.0, 6.0],
[float('-inf'), 1.5, 2.5, 4.0, 6.0],
[1.5, 2.5, 4.0, 6.0, float('inf')],
[float('-inf'), 1.5, 2.5, 4.0, 6.0, float('inf')],
[0.0, 1.0, 1.0, 4.0, 4.0, 10.0],
[0.0, 1.0, 1.0, 4.0, 4.0, 4.0, 4.0, 10.0],
],
'side': ['left', 'right'],
'shape': [(), (10,), (6, 3, 3)]})
)
class TestSearchSorted(unittest.TestCase):
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_searchsorted(self, xp, dtype):
x = testing.shaped_arange(self.shape, xp, dtype)
bins = xp.array(self.bins)
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.gpu
@testing.parameterize(
{'side': 'left'},
{'side': 'right'})
class TestSearchSortedNanInf(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_searchsorted_nanbins(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
bins = xp.array([0, 1, 2, 4, 10, float('nan')])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_nan(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('nan')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_nan_last(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[-1] = float('nan')
bins = xp.array([0, 1, 2, 4, float('nan')])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_nan_last_repeat(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[-1] = float('nan')
bins = xp.array([0, 1, 2, float('nan'), float('nan')])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_all_nans(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[-1] = float('nan')
bins = xp.array([float('nan'), float('nan'), float('nan'),
float('nan'), float('nan')])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_inf(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('inf')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_minf(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('-inf')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.gpu
class TestSearchSortedInvalid(unittest.TestCase):
# Cant test unordered bins due to numpy undefined
# behavior for searchsorted
def test_searchsorted_ndbins(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((10,), xp, xp.float64)
bins = xp.array([[10, 4], [2, 1], [7, 8]])
with pytest.raises(ValueError):
xp.searchsorted(bins, x)
@testing.gpu
class TestSearchSortedWithSorter(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_sorter(self, xp):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([3, 2, 1, 4, 0])
y = xp.searchsorted(bins, x, sorter=sorter)
return y,
def test_invalid_sorter(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([0])
with pytest.raises(ValueError):
xp.searchsorted(bins, x, sorter=sorter)
def test_nonint_sorter(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([], dtype=xp.float64)
with pytest.raises(TypeError):
xp.searchsorted(bins, x, sorter=sorter)
| tests/cupy_tests/sorting_tests/test_search.py | 25,947 | This class compares CUB results against NumPy's TODO(leofang): test axis after support is added xp is cupy, first ensure we really use CUB Cython checks return type, need to fool it this is the only function we can mock; the rest is cdef'd one pass two passes ...then perform the actual computation xp is cupy, first ensure we really use CUB Cython checks return type, need to fool it this is the only function we can mock; the rest is cdef'd one pass two passes ...then perform the actual computation Almost all values of a matrix `shaped_random` makes are not zero. To make a sparse matrix, we need multiply `m`. Cant test unordered bins due to numpy undefined behavior for searchsorted | 688 | en | 0.878948 |
import mock
import json
from collections import OrderedDict
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from apps.common.tests import GetResponseMixin
from apps.issue.models import Issue, IssueStatus, IssueExtValue
from apps.user_group.models import UserGroupType
from gated_launch_backend.settings_test import JIRA_API_URL, JIRA_ZC_USER
class BusinessModulesRESTTestCase(APITestCase, GetResponseMixin):
fixtures = [
"apps/auth/fixtures/tests/departments.json",
"apps/auth/fixtures/tests/users.json",
"apps/issue/fixtures/tests/business_modules.json"
]
def test_list_business_modules(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='normal_user'))
url = reverse('businessmodules-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_200_OK)
self.assertEqual(response.data,
OrderedDict([('status', 200), ('msg', '成功'),
('data', OrderedDict([('total', 2), ('next', None), ('previous', None),
('results',
[OrderedDict([('id', 2), ('name', 'parking car'),
('level', 1), ('parent', 'parking'),
('parentId', 1), ('disabled', True)]),
OrderedDict([('id', 1), ('name', 'parking'),
('level', 0), ('parent', None),
('parentId', None),
('disabled', False)])])]))]))
class PhoneBrandsRESTTestCase(APITestCase, GetResponseMixin):
fixtures = [
"apps/auth/fixtures/tests/departments.json",
"apps/auth/fixtures/tests/users.json",
"apps/issue/fixtures/tests/phone_brands.json"
]
def test_list_business_modules(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='normal_user'))
url = reverse('phonebrands-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_200_OK)
self.assertEqual(response.data,
OrderedDict([('status', 200), ('msg', '成功'),
('data', OrderedDict([('total', 1), ('next', None), ('previous', None),
('results', [OrderedDict([('id', 1),
('name', 'Huawei P8')])])]))]))
class RegionsRESTTestCase(APITestCase, GetResponseMixin):
fixtures = [
"apps/auth/fixtures/tests/departments.json",
"apps/auth/fixtures/tests/users.json",
"apps/issue/fixtures/tests/regions.json"
]
def test_list_business_modules(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='normal_user'))
url = reverse('regions-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_200_OK)
self.assertEqual(response.data,
OrderedDict([('status', 200), ('msg', '成功'),
('data', OrderedDict([('total', 1), ('next', None), ('previous', None),
('results', [OrderedDict([('id', 1),
('name', 'North')])])]))]))
class IssuesRESTTestCase(APITestCase, GetResponseMixin):
fixtures = [
"apps/common/fixtures/tests/images.json",
"apps/auth/fixtures/tests/departments.json",
"apps/auth/fixtures/tests/users.json",
"apps/user_group/fixtures/tests/user_groups.json",
"apps/app/fixtures/tests/app_types.json",
"apps/app/fixtures/tests/apps.json",
"apps/app/fixtures/tests/app_components.json",
"apps/task_manager/fixtures/tests/task_status.json",
"apps/task_manager/fixtures/tests/info_api_test_graytask.json",
"apps/task_manager/fixtures/tests/info_api_test_snapshotinnerstrategy.json",
"apps/issue/fixtures/tests/report_sources.json",
"apps/issue/fixtures/tests/issues.json",
"apps/usage/fixtures/tests/usage_eventtype.json",
"apps/usage/fixtures/tests/usage_eventtracking.json",
"apps/usage/fixtures/tests/usage_property.json"
]
def test_filter_issues_by_contain_creator(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'creator': 'normal_user', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'creator': 'normal_', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'creator': 'admin_user', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'creator': 'admin_', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'creator': 'app_owner_user', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'creator': 'app_owner_', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
def test_filter_issues_by_report_source(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'reportSource': 'weixin', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'reportSource': '四大区运营', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'reportSource': 'no_source', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
def test_filter_issues_by_jira_id(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'jiraId': 'CC-157', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'jiraId': 'AAABBB', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'jiraId': 'AA-170', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
def test_filter_issues_by_department(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'department': '网科集团', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '网科', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '质量管理部', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '质量', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '地产集团', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '不存在部门', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'department': '地产集团', 'appId': 2})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '地产', 'appId': 2})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '工程部', 'appId': 2})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '工', 'appId': 2})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '不存在部门', 'appId': 2})
self.assertEqual(self._get_response_total(response), 0)
def test_filter_issues_by_priority(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'priority': '紧急', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'priority': '一般', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'priority': '不紧急', 'appId': 1})
self.assertEqual(self._get_response_total(response), 2)
def test_filter_issues_by_status_order(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='admin_user'))
# set app_owner_user as owner of app 6
url = reverse('usergroups-list')
data = {'type': UserGroupType.OWNER, 'appId': 6}
response = self.client.get(url, data, format='json')
group_id = response.data['data']['results'][0]['id']
url = reverse('usergroupmems-list', kwargs={'group_id': group_id})
data = {'account': 'app_owner_user'}
self.client.post(url, data, format='json')
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'statusNameOrder': '关闭,处理中,待处理,挂起,验证', 'appId': 6})
expect_order = ['关闭', '处理中', '待处理', '挂起', '验证']
# remove duplication
real_order = OrderedDict.fromkeys([item['statusName'] for item in response.data['data']['results']]).keys()
self.assertEqual(expect_order, list(real_order))
response = self.client.get(reverse('issues-list'), {'statusNameOrder': '挂起,验证,关闭,处理中,待处理', 'appId': 6})
expect_order = ['挂起', '验证', '关闭', '处理中', '待处理']
real_order = OrderedDict.fromkeys([item['statusName'] for item in response.data['data']['results']]).keys()
self.assertEqual(expect_order, list(real_order))
def test_filter_issues_by_score_and_createdTime_startDate_endDate(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'score': 5, 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'score': 4, 'appId': 2})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'score': 5, 'appId': 2})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'score': 4, 'appId': 2, 'createdTime': '2017-07-01'})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'score': 4, 'appId': 2, 'createdTime': '2017-06-29'})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'score': 5, 'appId': 1, 'createdTime': '2017-07-01'})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'appId': 6, 'startDate': '2017-06-29', 'endDate': '2017-10-01'})
self.assertEqual(self._get_response_total(response), 7)
response = self.client.get(reverse('issues-list'), {'appId': 6, 'startDate': '2017-06-29', 'endDate': '2017-08-01'})
self.assertEqual(self._get_response_total(response), 5)
def test_filter_issues_by_multiple_score_value(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issues-list')
# appId 1
response = self.client.get(reverse('issues-list'), {'score': 5, 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'score': 4, 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
url_query = url + "?score=4&score=5&appId=1"
response = self.client.get(url_query)
self.assertEqual(self._get_response_total(response), 2)
url_query = url + "?score=4&score=5&appId=1&score=300000000"
response = self.client.get(url_query)
self.assertEqual(self._get_response_total(response), 2)
# appId 2
response = self.client.get(reverse('issues-list'), {'score': 4, 'appId': 2})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'score': 5, 'appId': 2})
self.assertEqual(self._get_response_total(response), 0)
url_query = url + "?score=4&score=5&appId=2&score=300000000"
response = self.client.get(url_query)
self.assertEqual(self._get_response_total(response), 1)
def test_create_issues_with_priority(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['priority'], '紧急')
# no priority field
response = self.client.post(reverse('issues-list'),
{'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['priority'], '一般')
# check result in backend
response = self.client.get(reverse('issues-list'), {'priority': '紧急', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'priority': '一般', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
def test_create_issues_with_report_source(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['score'], '0')
# from weiXin: with reportSource and score field and reportSource field equal '四大区运营'
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb', 'reportSource': '四大区运营',
'score': '非常严重'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['score'], '5')
# from weiXin: with reportSource field and no score filed and reportSource field equal '四大区运营'
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb', 'reportSource': '四大区运营'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['score'], '4')
def test_create_issues_with_updated_after(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'),
{'appId': 1, 'updatedAfter': '1987-01-01 10:13:20'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_response_total(response), 2)
response = self.client.get(reverse('issues-list'),
{'appId': 1, 'updatedAfter': '2030-01-01 10:13:20'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_response_total(response), 0)
date_time = '2017-06-29 20:25:00'
response = self.client.get(reverse('issues-list'),
{'appId': 1, 'updatedAfter': date_time})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_response_total(response), 0)
# update 1 issue
issue = Issue.objects.get(pk=1)
issue.save()
# filter with same updated time again
response = self.client.get(reverse('issues-list'),
{'appId': 1, 'updatedAfter': date_time})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_response_total(response), 1)
def test_update_issues_with_priority(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issues-detail', kwargs={'pk': 1})
response = self.client.patch(url, {'priority': '紧急'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['priority'], '紧急')
# check result in backend
response = self.client.get(reverse('issues-list'), {'priority': '紧急', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'priority': '不紧急', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
def test_update_issues_operator_no_jira(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issues-detail', kwargs={'pk': 2})
response = self.client.patch(url, {'operator': 'normal_user'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['operator'], 'manong')
def test_update_issues_operator_exist_jira(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issues-detail', kwargs={'pk': 1})
response = self.client.patch(url, {'operator': 'normal_user'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['operator'], 'normal_user')
def test_issue_stats_creator(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
data = {'creatorId': 2}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['totalIssues'], 7)
self.assertEqual(response.data['data']['results']['statusStats']['closed'], 2)
def test_issue_stats_report_source(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
data = {'reportSource': '四大区运营'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['totalIssues'], 9)
self.assertEqual(response.data['data']['results']['statusStats']['closed'], 2)
def test_issue_stats_report_source_and_creator(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
data = {'reportSource': '四大区运营', 'creatorId': 2}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['totalIssues'], 6)
self.assertEqual(response.data['data']['results']['statusStats']['closed'], 2)
def test_issue_stats_report_source_and_app(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
data = {'reportSource': '四大区运营', 'appId': 2}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['totalIssues'], 1)
self.assertEqual(response.data['data']['results']['statusStats']['closed'], 1)
def test_issue_stats_report_source_and_task(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
data = {'reportSource': '四大区运营', 'taskId': 2}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['totalIssues'], 8)
self.assertEqual(response.data['data']['results']['statusStats']['closed'], 2)
def test_issue_stats_test_filter_start_end_time(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
data = {'startTime': '2017-01-01'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 10)
data = {'startTime': '2017-10-01', 'appId': 6}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 2)
data = {'startTime': '2017-10-01', 'appId': 6, 'endTime': '2017-10-10'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 2)
data = {'startTime': '2017-09-01', 'appId': 6, 'endTime': '2017-09-30'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 0)
data = {'startTime': '2017-11-01', 'appId': 6, 'endTime': '2017-11-30'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 0)
def test_issue_stats_test_valid_issues(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
response = self.client.get(url, format='json')
self.assertEqual(response.data['data']['validIssues'], 4)
data = {'appId': 6}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['validIssues'], 1)
data = {'endTime': '2017-09-30'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['validIssues'], 3)
def test_issue_stats_test_filter_issue_from(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
data = {'issueFrom': 'local'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 1)
data = {'issueFrom': 'remote'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 1)
data = {'issueFrom': 'fake_one'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 0)
data = {'issueFrom': 'local', 'appId': 6}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 1)
data = {'issueFrom': 'remote', 'appId': 6}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 1)
data = {'issueFrom': 'local', 'appId': 1}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 0)
data = {'issueFrom': 'remote', 'appId': 2}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 0)
def test_create_issues_with_extended_fields(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': True, 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '华为'}},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['extFields'], {'手机型号': '华为', '广场': '通州万达'})
def test_can_not_create_issues_with_undefined_extended_fields(self):
# 不能传入未定义的字段
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': True, 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'场地': '通州万达', '手机型号': '华为'}},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
OrderedDict([('status', 400),
('msg', 'Not found: IssueExtField matching query does not exist.')])
def test_can_not_create_issues_without_must_have_extended_fields(self):
# 必须的字段一定要有
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': False, 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达'}},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
OrderedDict([('status', 400), ('msg', "缺少以下必须扩展字段: {'手机型号'}")])
def test_update_issues_with_extended_fields(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': True, 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '华为'}},
format='json')
issue_id = response.data['data']['id']
url = reverse('issues-detail', kwargs={'pk': issue_id})
response = self.client.patch(url, {'extFields': {'手机型号': '苹果'}},
format='json')
# 会全量更新扩展字段
self.assertEqual(response.data['data']['extFields'], {'手机型号': '苹果'})
def test_get_issue_extended_field_value_from_model_obj(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': True, 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '华为'}},
format='json')
issue_id = response.data['data']['id']
issue_obj = Issue.objects.get(id=issue_id)
self.assertEqual('通州万达', issue_obj.get_ext_field_value('广场'))
self.assertEqual('华为', issue_obj.get_ext_field_value('手机型号'))
def test_set_issue_extended_field_value_from_model_obj(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': True, 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '华为'}},
format='json')
issue_id = response.data['data']['id']
issue_obj = Issue.objects.get(id=issue_id)
self.assertTrue(issue_obj.set_ext_field_value('广场', '瞎写的广场'))
self.assertEqual('瞎写的广场', issue_obj.get_ext_field_value('广场'))
# 不影响其他字段
self.assertEqual('华为', issue_obj.get_ext_field_value('手机型号'))
self.assertTrue(issue_obj.set_ext_field_value('手机型号', '瞎写的手机型号'))
self.assertEqual('瞎写的手机型号', issue_obj.get_ext_field_value('手机型号'))
self.assertFalse(issue_obj.set_ext_field_value('瞎写的字段', 'aaa'))
def test_delete_issue_will_delete_extended_fields(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': True, 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '华为'}},
format='json')
issue_id = response.data['data']['id']
url = reverse('issues-detail', kwargs={'pk': issue_id})
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_200_OK)
self.assertEqual(0, IssueExtValue.objects.filter(issue_id=issue_id).count())
def test_update_issues_will_check_extended_fields(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场', 'isOptional': False})
self.client.post(url, {'name': '手机型号', 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '华为'}},
format='json')
issue_id = response.data['data']['id']
url = reverse('issues-detail', kwargs={'pk': issue_id})
response = self.client.patch(url, {'extFields': {'手机型号': '苹果'}},
format='json')
self.assertEqual(response.data['status'], status.HTTP_400_BAD_REQUEST)
def test_filter_issues_with_extended_fields(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场', 'isOptional': False})
self.client.post(url, {'name': '手机型号', 'default': 'iPhone', 'type': 'string'})
self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '华为'}},
format='json')
self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '苹果'}},
format='json')
self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '大望路万达', '手机型号': '华为'}},
format='json')
url = reverse('issues-list')
data = {'广场': '大望路万达', 'appId': 1}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 1)
data = {'广场': '大望路万达', 'appId': 6}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 0)
data = {'广场': '通州万达', 'appId': 1}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 2)
data = {'手机型号': '华为', 'appId': 1, 'taskId': 1}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 2)
data = {'手机型号': '华为', '广场': '通州万达', 'appId': 1, 'taskId': 1}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 1)
data = {'手机型号': '华为', '广场': '大望路万达'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 1)
data = {'手机型号': '苹果', '广场': '大望路万达'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 0)
data = {'广场': '大望路万达'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 1)
data = {'手机型号': '华为'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 2)
data = {'广场': '通州万达'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 2)
# check in pagination condition
data = {'广场': '通州万达', 'pageSize': 1}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 2)
data = {'广场': '通州万达', 'pageSize': 1, 'page': 2}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 2)
def test_issue_component(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issues-detail', kwargs={'pk': 1})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['componentName'], '技术支持')
url = reverse('issues-detail', kwargs={'pk': 3})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['componentName'], '飞凡众测')
def test_issue_operator_no_jira_link(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issues-detail', kwargs={'pk': 8})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['jiraId'], '')
self.assertEqual(response.data['data']['operator'], 'manong')
def test_issue_operator_exist_jira_link(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issues-detail', kwargs={'pk': 1})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['jiraId'], 'CC-157')
self.assertEqual(response.data['data']['operator'], 'mingong')
def mocked_zc_set_jira_status(*args, **kwargs):
return '待处理', ['status changed!']
def mocked_jira_issue_is_avaliable(*args, **kwargs):
return True
# This method will be used by the mock to replace requests.post
def mocked_requests_post(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
self.data = json.dumps(self.json_data)
def json(self):
return self.json_data
if args[0] == JIRA_API_URL:
return MockResponse({'data': {'status': '待处理', 'jiraId': 'AA-157', 'operator': 'dalingdao'},
'status': 200}, 200)
return MockResponse(None, 404)
class IssuesJiraRESTTestCase(APITestCase, GetResponseMixin):
fixtures = [
"apps/common/fixtures/tests/images.json",
"apps/auth/fixtures/tests/departments.json",
"apps/auth/fixtures/tests/users.json",
"apps/user_group/fixtures/tests/user_groups.json",
"apps/app/fixtures/tests/app_types.json",
"apps/app/fixtures/tests/apps.json",
"apps/task_manager/fixtures/tests/task_status.json",
"apps/task_manager/fixtures/tests/info_api_test_graytask.json",
"apps/task_manager/fixtures/tests/info_api_test_snapshotinnerstrategy.json",
"apps/issue/fixtures/tests/report_sources.json",
"apps/issue/fixtures/tests/issues.json",
]
def setUp(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
@mock.patch('apps.issue.views.zc_set_jira_status', side_effect=mocked_zc_set_jira_status)
@mock.patch('apps.issue.views.jira_issue_is_avaliable', side_effect=mocked_jira_issue_is_avaliable)
def test_update_jira_comment_with_empty_jira_info(self, mock_obj_1, mock_obj_2):
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 1}),
{'conclusion': 'fail', 'comment': 'first comment'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['comments'][0]['info'], "first comment")
self.assertEqual(response.data['data']['comments'][0]['wanxin'], "app_owner_user")
self.assertEqual(response.data['data']['changeLog'][0], "status changed!")
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 1}),
{'conclusion': 'fail', 'comment': 'second comment'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['comments'][0]['info'], "second comment")
self.assertEqual(response.data['data']['comments'][0]['wanxin'], "app_owner_user")
self.assertEqual(response.data['data']['changeLog'][0], "status changed!")
@mock.patch('apps.issue.views.zc_set_jira_status', side_effect=mocked_zc_set_jira_status)
def test_update_jira_comment_with_no_jira(self, mock_obj):
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 2}),
{'conclusion': '验证不通过'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['status'], "处理中")
issue_with_pk_2 = Issue.objects.get(pk=2)
self.assertEqual(issue_with_pk_2.status.name, "处理中")
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 2}),
{'conclusion': '验证通过'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['status'], "关闭")
issue_with_pk_2 = Issue.objects.get(pk=2)
self.assertEqual(issue_with_pk_2.status.name, "关闭")
@mock.patch('apps.issue.views.zc_set_jira_status', side_effect=mocked_zc_set_jira_status)
@mock.patch('apps.issue.views.jira_issue_is_avaliable', side_effect=mocked_jira_issue_is_avaliable)
def test_update_jira_comment_with_jira_info_and_no_comments(self, mock_obj_1, mock_obj_2):
issue_with_pk_1 = Issue.objects.get(pk=1)
issue_with_pk_1.other = """{"phoneBrand": "华为 p8", "area": "四大区", "业务模块": "不知道写啥"}"""
issue_with_pk_1.save()
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 1}),
{'conclusion': 'fail', 'comment': 'first comment'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['comments'][0]['info'], "first comment")
self.assertEqual(response.data['data']['comments'][0]['wanxin'], "app_owner_user")
self.assertEqual(response.data['data']['changeLog'][0], "status changed!")
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 1}),
{'conclusion': 'pass', 'comment': 'second comment'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['comments'][0]['info'], "second comment")
self.assertEqual(response.data['data']['comments'][0]['wanxin'], "app_owner_user")
self.assertEqual(response.data['data']['changeLog'][0], "status changed!")
@mock.patch('apps.issue.views.zc_set_jira_status', side_effect=mocked_zc_set_jira_status)
@mock.patch('apps.issue.views.jira_issue_is_avaliable', side_effect=mocked_jira_issue_is_avaliable)
def test_update_jira_comment_with_jira_info_and_comments(self, mock_obj_1, mock_obj_2):
issue_with_pk_1 = Issue.objects.get(pk=1)
issue_with_pk_1.other = """{"phoneBrand": "华为 p8", "area": "四大区", "业务模块": "不知道写啥",
"comments": [{"wanxin": "app_owner_user", "email": "app_owner_user@test.com",
"name": "", "info": "presetting comment", "startTime": "", "endTime": ""}]}"""
issue_with_pk_1.save()
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 1}),
{'conclusion': 'fail', 'comment': 'first comment'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['comments'][0]['info'], "first comment")
self.assertEqual(response.data['data']['comments'][0]['wanxin'], "app_owner_user")
self.assertEqual(response.data['data']['changeLog'][0], "status changed!")
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 1}),
{'conclusion': 'pass', 'comment': 'second comment'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['comments'][0]['info'], "second comment")
self.assertEqual(response.data['data']['comments'][0]['wanxin'], "app_owner_user")
self.assertEqual(response.data['data']['changeLog'][0], "status changed!")
@mock.patch('apps.issue.views.zc_set_jira_status', side_effect=mocked_zc_set_jira_status)
@mock.patch('apps.issue.views.jira_issue_is_avaliable', side_effect=mocked_jira_issue_is_avaliable)
def test_update_jira_status(self, mock_obj_1, mock_obj_2):
issue_with_pk_1 = Issue.objects.get(pk=1)
issue_with_pk_1.status = IssueStatus.objects.get(name='验证')
issue_with_pk_1.save()
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 1}),
{'conclusion': 'fail', 'comment': 'first comment'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['comments'][0]['info'], "first comment")
self.assertEqual(response.data['data']['comments'][0]['wanxin'], "app_owner_user")
self.assertEqual(response.data['data']['changeLog'][0], "status changed!")
issue_with_pk_1.refresh_from_db()
self.assertEqual(issue_with_pk_1.status.name, '待处理')
@mock.patch('requests.post', side_effect=mocked_requests_post)
def test_create_jira(self, mock_post):
url = reverse('issuetojira')
data = {'issueId': 1}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['jiraId'], 'CC-157')
data = {'issueId': 8}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['jiraId'], 'AA-157')
def test_jira_to_zc_jira_not_exist(self):
url = reverse('jiratoissue')
data = {
"issue": {
"key": "CC-15"
},
"user": {
"name": "zhongce"
}
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['issueId'], None)
self.assertEqual(response.data['data']['jiraId'], 'CC-15')
def test_jira_to_zc_user_is_zhongce(self):
url = reverse('jiratoissue')
data = {
"issue": {
"key": "CC-157"
},
"user": {
"name": JIRA_ZC_USER
}
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['issueId'], 1)
self.assertEqual(response.data['data']['jiraId'], 'CC-157')
def test_jira_to_zc_user_is_not_zhongce(self):
url = reverse('jiratoissue')
data = {
"issue": {
"key": "CC-157"
},
"user": {
"name": "zhaochunyan7"
}
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['issueId'], 1)
self.assertEqual(response.data['data']['jiraId'], 'CC-157')
def test_generate_change_log_jira_not_exist_update_priority(self):
url = reverse('issues-detail', kwargs={'pk': 8})
response = self.client.patch(url, {'priority': '紧急'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
zc_change_logs = json.loads(response.data['data']['zcChangeLogs'])
zc_change_logs[0].pop('created')
self.assertEqual(zc_change_logs, [{'wanxin': 'app_owner_user',
'items': [{'field': 'priority', 'toString': '紧急', 'fromString': '不紧急'}],
'author': ''}])
def test_generate_change_log_jira_not_exist_update_images(self):
url = reverse('issues-detail', kwargs={'pk': 8})
response = self.client.patch(url, {'images': ['aabbceadfdfdfdfdfdf']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
zc_change_logs = json.loads(response.data['data']['zcChangeLogs'])
zc_change_logs[0].pop('created')
self.assertEqual(zc_change_logs, [{'wanxin': 'app_owner_user',
'items': [{'field': 'images',
'toString': "['aabbceadfdfdfdfdfdf']",
'fromString': '[]'}],
'author': ''}])
@mock.patch('requests.post', side_effect=mocked_requests_post)
def test_generate_change_log_create_jira(self, mock_post):
url = reverse('issuetojira')
data = {'issueId': 8}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['jiraId'], 'AA-157')
issue = Issue.objects.get(pk=8)
zc_change_logs = json.loads(issue.zc_change_logs)
zc_change_logs[0].pop('created')
self.assertEqual(zc_change_logs, [{'author': '', 'wanxin': 'app_owner_user',
'items': [{'fromString': '', 'toString': 'AA-157', 'field': 'jira link'}]}])
class IssuesLiteRESTTestCase(APITestCase, GetResponseMixin):
fixtures = [
"apps/common/fixtures/tests/images.json",
"apps/auth/fixtures/tests/departments.json",
"apps/auth/fixtures/tests/users.json",
"apps/user_group/fixtures/tests/user_groups.json",
"apps/app/fixtures/tests/app_types.json",
"apps/app/fixtures/tests/apps.json",
"apps/task_manager/fixtures/tests/task_status.json",
"apps/task_manager/fixtures/tests/info_api_test_graytask.json",
"apps/task_manager/fixtures/tests/info_api_test_snapshotinnerstrategy.json",
"apps/issue/fixtures/tests/report_sources.json",
"apps/issue/fixtures/tests/issues.json",
]
def test_filter_issues_by_contain_creator(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issueslite-list'), {'creator': 'normal_user', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'creator': 'normal_', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'creator': 'admin_user', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'creator': 'admin_', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'creator': 'app_owner_user', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issueslite-list'), {'creator': 'app_owner_', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
def test_filter_issues_by_report_source(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issueslite-list'), {'reportSource': 'weixin', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'reportSource': '四大区运营', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'reportSource': 'no_source', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
def test_filter_issues_by_priority(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'priority': '紧急', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'priority': '一般', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'priority': '不紧急', 'appId': 1})
self.assertEqual(self._get_response_total(response), 2)
def test_filter_issues_by_status_order(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='admin_user'))
# set app_owner_user as owner of app 6
url = reverse('usergroups-list')
data = {'type': UserGroupType.OWNER, 'appId': 6}
response = self.client.get(url, data, format='json')
group_id = response.data['data']['results'][0]['id']
url = reverse('usergroupmems-list', kwargs={'group_id': group_id})
data = {'account': 'app_owner_user'}
self.client.post(url, data, format='json')
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issueslite-list'),
{'statusNameOrder': '关闭,处理中,待处理,挂起,验证', 'appId': 6})
expect_order = ['关闭', '处理中', '待处理', '挂起', '验证']
# remove duplication
real_order = OrderedDict.fromkeys([item['statusName'] for item in response.data['data']['results']]).keys()
self.assertEqual(expect_order, list(real_order))
response = self.client.get(reverse('issueslite-list'),
{'statusNameOrder': '挂起,验证,关闭,处理中,待处理', 'appId': 6})
expect_order = ['挂起', '验证', '关闭', '处理中', '待处理']
real_order = OrderedDict.fromkeys([item['statusName'] for item in response.data['data']['results']]).keys()
self.assertEqual(expect_order, list(real_order))
def test_created_time_order_when_filter_issues_by_status_order_created_time(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='admin_user'))
# set app_owner_user as owner of app 6
url = reverse('usergroups-list')
data = {'type': UserGroupType.OWNER, 'appId': 6}
response = self.client.get(url, data, format='json')
group_id = response.data['data']['results'][0]['id']
url = reverse('usergroupmems-list', kwargs={'group_id': group_id})
data = {'account': 'app_owner_user'}
self.client.post(url, data, format='json')
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issueslite-list'),
{'statusNameOrder': '关闭,处理中,待处理,挂起,验证', 'appId': 6})
result = [(item['statusName'], item['createdAt']) for item in response.data['data']['results']]
self.assertEqual(result, [('关闭', '2017-06-29T18:25:11.681308'),
('处理中', '2017-06-29T18:25:11.681308'),
('待处理', '2017-06-29T18:25:11.681308'), ('待处理', '2017-10-01T18:22:11.681308'),
('待处理', '2017-10-01T18:25:11.681308'),
('挂起', '2017-06-29T18:25:11.681308'),
('验证', '2017-06-29T18:25:11.681308')])
def test_filter_issues_by_multiple_score_value(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueslite-list')
# appId 1
response = self.client.get(reverse('issueslite-list'), {'score': 5, 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'score': 4, 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
url_query = url + "?score=4&score=5&appId=1"
response = self.client.get(url_query)
self.assertEqual(self._get_response_total(response), 2)
url_query = url + "?score=4&score=5&appId=1&score=300000000"
response = self.client.get(url_query)
self.assertEqual(self._get_response_total(response), 2)
# appId 2
response = self.client.get(reverse('issueslite-list'), {'score': 4, 'appId': 2})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'score': 5, 'appId': 2})
self.assertEqual(self._get_response_total(response), 0)
url_query = url + "?score=4&score=5&appId=2&score=300000000"
response = self.client.get(url_query)
self.assertEqual(self._get_response_total(response), 1)
def test_issues_response(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
# appId 1
response = self.client.get(reverse('issueslite-list'), {'score': 5, 'appId': 1})
self.assertEqual(response.data,
OrderedDict([('status', 200),
('msg', '成功'),
('data', OrderedDict([('total', 1), ('next', None),
('previous', None),
('results',
[OrderedDict([('id', 1),
('jiraId', 'CC-157'),
('statusName', '待处理'), ('title', ''),
('createdAt', '2017-06-29T18:25:11.681308'),
('other', '{"phoneNumber":"15921372222","order":"12345678","phoneType":"P9","version":"0928gray","square":"通州万达","summary":"example全量数据","description":"example全量数据","occurrenceTime":"2017-09-01T09:01:00.000+0800","area":"ALL","phoneBrand":"华为","severity":"次要","businessType":"停车"}'), # noqa
('score', 5), ('remindKSTFlag', False),
('remindPlatFlag', False)])])]))]))
| apps/issue/testcases/integration/tests.py | 61,964 | set app_owner_user as owner of app 6 remove duplication appId 1 appId 2 no priority field check result in backend from weiXin: with reportSource and score field and reportSource field equal '四大区运营' from weiXin: with reportSource field and no score filed and reportSource field equal '四大区运营' update 1 issue filter with same updated time again check result in backend 不能传入未定义的字段 必须的字段一定要有 会全量更新扩展字段 不影响其他字段 check in pagination condition This method will be used by the mock to replace requests.post set app_owner_user as owner of app 6 remove duplication set app_owner_user as owner of app 6 appId 1 appId 2 appId 1 noqa | 618 | en | 0.717407 |
# encoding = utf-8
"""
//
// AzureMonitorAddonForSplunk
//
// Copyright (c) Microsoft Corporation
//
// All rights reserved.
//
// MIT License
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the ""Software""), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is furnished
// to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
"""
import sys
from timewindow import put_time_window, put_time_checkpoint
from concurrent import futures
from subs import get_subscription_segment, get_resources, get_azure_environment, \
get_access_token, get_metrics_for_resources, get_secret_from_keyvault
MASK = '********'
def create_or_update_storage_password(self, props, logger):
'''
unencrypted password in inputs.conf, encrypt it and store as storagePassword
'''
try:
locale = 'reference'
storage_passwords = self.service.storage_passwords
if props['username'] in storage_passwords:
locale = 'delete'
storage_passwords.delete(props['username'])
except Exception as e:
logger('ERROR', 'Error at locale {1} in create_or_update_storage_password: {0}'\
.format(e, locale))
try:
locale = 'create'
self.service.storage_passwords.create(props['password'], props['username'])
except Exception as e:
logger('ERROR', 'Error at locale {1} in create_or_update_storage_password: {0}'\
.format(e, locale))
def mask_id_and_key(self, name, logger):
'''
masks the app_id and app_key in inputs.conf
'''
kind, input_name = name.split('://')
item = self.service.inputs.__getitem__((input_name, kind))
try:
new_input = {
'vaultName': item.content.vaultName,
'SPNTenantID': item.content.SPNTenantID,
'SPNApplicationId': MASK,
'SPNApplicationKey': MASK,
'SubscriptionId': item.content.SubscriptionId,
'secretName': item.content.secretName,
'secretVersion': item.content.secretVersion,
'index': item.content.index,
'interval': item.content.interval,
'sourcetype': item.content.sourcetype
}
item.update(**new_input).refresh()
except Exception as e:
logger('ERROR', 'Error caught in mask_id_and_key: {0}'.format(e))
def get_or_store_secrets(self, inputs, logger):
'''
Either read existing encyrpted password or encrypt clear text password and store it
Either way, return a set of clear text credentials
'''
input_items = inputs.inputs.itervalues().next()
input_name = inputs.inputs.iterkeys().next()
credentials = {}
storage_passwords = self.service.storage_passwords
props_app_id = {}
props_app_id['username'] = 'AzureMonitorMetricsAppID-{0}'.format(input_name.replace(':','_'))
props_app_id['password'] = input_items.get("SPNApplicationId")
props_app_key = {}
props_app_key['username'] = 'AzureMonitorMetricsAppKey-{0}'.format(input_name.replace(':','_'))
props_app_key['password'] = input_items.get("SPNApplicationKey")
app_id = input_items.get("SPNApplicationId")
app_key = input_items.get("SPNApplicationKey")
if app_id is not None and app_key is not None:
try:
if ("AzureMonitorMetricsAppID" in storage_passwords) and (props_app_id['username'] not in storage_passwords):
# Create new unique storage password entry for AzureMonitorMetricsAppID based on input name
modify_storage_password(self, "AzureMonitorMetricsAppID", props_app_id['username'], logger)
if ("AzureMonitorMetricsAppKey" in storage_passwords) and (props_app_key['username'] not in storage_passwords):
# Create new unique storage password entry for AzureMonitorMetricsAppKey based on input name
modify_storage_password(self, "AzureMonitorMetricsAppKey", props_app_key['username'], logger)
if props_app_id['password'] == MASK:
app_id, app_key = get_app_id_and_key(self, props_app_id, props_app_key, logger)
else:
create_or_update_storage_password(self, props_app_id, logger)
create_or_update_storage_password(self, props_app_key, logger)
mask_id_and_key(self, input_name, logger)
except Exception as e:
logger('ERROR', 'Error caught in get_or_store_secrets: {0}'.format(e))
credentials['app_id'] = app_id
credentials['app_key'] = app_key
return credentials
def get_app_id_and_key(self, props_app_id, props_app_key, logger):
'''
get the encrypted app_id and app_key from storage_passwords
'''
storage_passwords = self.service.storage_passwords
if props_app_id['username'] not in storage_passwords:
raise KeyError('Did not find app_id {} in storage_passwords.'\
.format(props_app_id['username']))
if props_app_key['username'] not in storage_passwords:
raise KeyError('Did not find app_id {} in storage_passwords.'\
.format(props_app_key['username']))
app_id = ''
app_key = ''
try:
app_id = storage_passwords[props_app_id['username']].clear_password
app_key = storage_passwords[props_app_key['username']].clear_password
except Exception as e:
logger('ERROR', 'Error caught in get_app_id_and_key: {0}'.format(e))
return app_id, app_key
def modify_storage_password(self, old_username, new_username, logger):
logger('INFO', 'Updating storage password. Old username: {0}, new username: {1}'.format(old_username, new_username))
storage_passwords = self.service.storage_passwords
try:
password = storage_passwords[old_username].clear_password
storage_passwords.create(password, new_username)
except Exception as e:
logger('ERROR', 'Error updating storage password in modify_storage_password: {0}'.format(e))
def get_resources_for_rgs(ew, bearer_token, sub_url, resource_groups, input_sourcetype, checkpoint_dict):
"""
map the resource groups to a function that gets resources
"""
resource_group_names = []
for resource_group in resource_groups:
resource_group_names.append(resource_group['name'])
with futures.ThreadPoolExecutor(max_workers=5) as executor:
rg_future = dict((executor.submit(get_resources, ew, bearer_token, sub_url, rg), rg)
for rg in resource_group_names)
for future in futures.as_completed(rg_future, None):
resource_group = rg_future[future]
if future.exception() is not None:
ew.log('ERROR', 'Resource group {0} generated an exception: {1}'
.format(resource_group, future.exception()))
else:
get_metrics_for_resources(ew, bearer_token, \
sub_url, resource_group, future.result(), input_sourcetype, checkpoint_dict)
def get_metrics_for_subscription(inputs, credentials, ew):
"""
top level function
given subscription id and credentials, get metrics for all resources with the right tags
splunk sends an array of inputs, but only one element, hence the [0]
"""
metadata = inputs.metadata
input_name, input_item = inputs.inputs.popitem()
stanza = input_name.split('://')
instance_name = stanza[1]
try:
locale = "checkpoint file data"
checkpoint_dir = metadata['checkpoint_dir']
checkpoint_dict = {"checkpoint_dir":checkpoint_dir, "instance_name": instance_name}
locale = "put_time_window"
# update the time window for this iteration
put_time_window(ew, checkpoint_dict)
locale = "put_time_checkpoint"
# and update the checkpoint for next time
put_time_checkpoint(ew, checkpoint_dict)
tenant_id = input_item.get("SPNTenantID")
spn_client_id = credentials.get('app_id')
spn_client_secret = credentials.get('app_key')
subscription_id = input_item.get("SubscriptionId")
key_vault_name = input_item.get("vaultName")
secret_name = input_item.get("secretName")
secret_version = input_item.get("secretVersion")
input_sourcetype = input_item.get("sourcetype")
arm_creds = {}
if spn_client_id is not None and spn_client_secret is not None:
locale = "get_access_token for key vault SPN"
authentication_endpoint = "https://login.windows.net/"
resource = 'https://vault.azure.net'
kv_bearer_token = get_access_token(
tenant_id,
spn_client_id,
spn_client_secret,
authentication_endpoint,
resource)
locale = "get_secret_from_keyvault"
arm_creds = get_secret_from_keyvault(ew, kv_bearer_token,
key_vault_name, secret_name, secret_version)
locale = "get_access_token"
authentication_endpoint = get_azure_environment(
'Azure')['activeDirectoryEndpointUrl']
resource = get_azure_environment(
'Azure')['activeDirectoryResourceId']
bearer_token = get_access_token(
tenant_id,
arm_creds.get('spn_client_id'),
arm_creds.get('spn_client_secret'),
authentication_endpoint,
resource)
locale = "get_azure_environment"
resource_mgr_endpoint_url = get_azure_environment(
'Azure')['resourceManagerEndpointUrl']
locale = "get_subscription_segment"
sub_url = resource_mgr_endpoint_url + \
get_subscription_segment(subscription_id)
locale = "get_resources"
resource_groups = get_resources(ew, bearer_token, sub_url)
locale = "get_resources_for_rgs"
get_resources_for_rgs(ew, bearer_token, sub_url, resource_groups, input_sourcetype, checkpoint_dict)
except:
ew.log('ERROR', 'Error caught in get_metrics_for_subscription, type: {0}, value: {1}, locale = {2}'
.format(sys.exc_info()[0], sys.exc_info()[1], locale))
| bin/azure_monitor_metrics_main.py | 11,055 | unencrypted password in inputs.conf, encrypt it and store as storagePassword
get the encrypted app_id and app_key from storage_passwords
top level function
given subscription id and credentials, get metrics for all resources with the right tags
splunk sends an array of inputs, but only one element, hence the [0]
Either read existing encyrpted password or encrypt clear text password and store it
Either way, return a set of clear text credentials
map the resource groups to a function that gets resources
masks the app_id and app_key in inputs.conf
//
// AzureMonitorAddonForSplunk
//
// Copyright (c) Microsoft Corporation
//
// All rights reserved.
//
// MIT License
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the ""Software""), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is furnished
// to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
encoding = utf-8 Create new unique storage password entry for AzureMonitorMetricsAppID based on input name Create new unique storage password entry for AzureMonitorMetricsAppKey based on input name update the time window for this iteration and update the checkpoint for next time | 2,027 | en | 0.730768 |
# Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
from version import __version__
setup(
name='opencensus-ext-datadog',
version=__version__, # noqa
author='OpenCensus Authors',
author_email='census-developers@googlegroups.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
description='OpenCensus Datadog exporter',
include_package_data=True,
install_requires=[
'bitarray >= 1.0.1, < 2.0.0',
'opencensus >= 0.9.dev0, < 1.0.0',
'requests >= 2.19.0',
],
extras_require={},
license='Apache-2.0',
packages=find_packages(exclude=(
'examples',
'tests',
)),
namespace_packages=[],
url='https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-datadog', # noqa: E501
zip_safe=False,
)
| contrib/opencensus-ext-datadog/setup.py | 2,047 | Copyright 2019, OpenCensus Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. noqa noqa: E501 | 572 | en | 0.857359 |
from itertools import tee
import numpy as np
import scipy.interpolate as intp
from scipy.signal import savgol_filter
def get_edge_bin(array):
"""Detect the edge indcies of a binary 1-D array.
Args:
array (:class:`numpy.ndarray`): A list or Numpy 1d array, with binary
(0/1) or boolean (True/False) values.
Returns:
list: A list containing starting and ending indices of the non-zero
blocks.
Examples:
.. code-block:: python
>>> a = [0,1,1,0,0,0,1,0,1]
>>> get_edge_bin(a)
[(1, 3), (6, 7), (8, 9)]
>>> b = [True, False, True, True, False, False]
>>> get_edge_bin(b)
[(0, 1), (2, 4)]
"""
array1 = np.int64(array)
array1 = np.insert(array1, 0, 0)
array1 = np.append(array1, 0)
tmp = array1 - np.roll(array1, 1)
i1_lst = np.nonzero(tmp == 1)[0] - 1
i2_lst = np.nonzero(tmp ==-1)[0] - 1
return list(zip(i1_lst, i2_lst))
def get_local_minima(x, window=None):
"""Get the local minima of a 1d array in a window.
Args:
x (:class:`numpy.ndarray`): A list or Numpy 1d array.
window (*int* or :class:`numpy.ndarray`): An odd integer or a list of
odd integers as the lengthes of searching window.
Returns:
tuple: A tuple containing:
* **index** (:class:`numpy.ndarray`): A numpy 1d array containing
indices of all local minima.
* **x[index]** (:class:`numpy.ndarray`): A numpy 1d array containing
values of all local minima.
"""
x = np.array(x)
dif = np.diff(x)
ind = dif > 0
tmp = np.logical_xor(ind, np.roll(ind,1))
idx = np.logical_and(tmp,ind)
index = np.where(idx)[0]
if window is None:
# window is not given
return index, x[index]
else:
# window is given
if isinstance(window, int):
# window is an integer
window = np.repeat(window, len(x))
elif isinstance(window, np.ndarray):
# window is a numpy array
#if np.issubdtype(window.dtype, int):
if window.dtype.type in [np.int16, np.int32, np.int64]:
pass
else:
# window are not integers
print('window array are not integers')
raise ValueError
else:
raise ValueError
if 0 in window%2:
# not all of the windows are odd
raise ValueError
halfwin_lst = (window-1)//2
index_lst = []
for i in index:
halfwin = halfwin_lst[i]
i1 = max(0, i-halfwin)
i2 = min(i+halfwin+1, len(x))
if i == x[i1:i2].argmin() + i1:
index_lst.append(i)
if len(index_lst)>0:
index_lst = np.array(index_lst)
return index_lst, x[index_lst]
else:
return np.array([]), np.array([])
def implete_none(lst):
"""Replace the None elemnets at the beginning and the end of list by auto
increment integers.
Convert the first and last few `None` elements to auto increment integers.
These integers are determined by the first and last integers in the input
array.
While the `None` elements between two integers in the input list will
remain.
Args:
lst (list): A list contaning None values.
Returns:
newlst (list): A list containing auto increment integers.
Examples:
.. code-block:: python
>>> a = [None,None,3,4,None,5,6,None,None]
>>> implete_none(a)
[1, 2, 3, 4, None, 5, 6, 7, 8]
"""
# filter the None values
notnone_lst = [v for v in lst if v is not None]
for i, v in enumerate(lst):
if v == notnone_lst[0]:
# first not-None element and its index
notnone1 = i
value1 = v
if v == notnone_lst[-1]:
# last not-None element and its index
notnone2 = i
value2 = v
newlst = []
for i,v in enumerate(lst):
if i < notnone1:
newlst.append(value1-(notnone1-i))
elif i > notnone2:
newlst.append(value2+(i-notnone2))
else:
newlst.append(v)
return newlst
def derivative(*args, **kwargs):
"""Get the first derivative of data arrays (*x*, *y*).
If **y** is not given, the first argument will be taken as **y**, and the
differential of the input array will be returned.
Args:
x (list or :class:`numpy.ndarray`): X-values of the input array (optional).
y (list or :class:`numpy.ndarray`): Y-values of the input array.
points (int): Number of points used to calculate derivative
(optional, default is 3).
Returns:
:class:`numpy.ndarray`: Derivative of the input array.
"""
if len(args) == 1:
y = np.array(args[0], dtype=np.float64)
x = np.arange(y.size)
elif len(args) == 2:
x = np.array(args[0], dtype=np.float64)
y = np.array(args[1], dtype=np.float64)
else:
raise ValueError
npts = x.size
points = kwargs.pop('points', 3)
if points == 3:
der = (np.roll(y,-1) - np.roll(y,1))/(np.roll(x,-1) - np.roll(x,1))
a = np.array([-3., 4., -1.])
der[0] = (a*y[0:3]).sum() / (a*x[0:3]).sum()
der[-1] = (-a[::-1]*y[-3:]).sum() / (-a[::-1]*x[-3:]).sum()
return der
else:
raise ValueError
def pairwise(array):
"""Return pairwises of an iterable arrary.
Args:
array (list or :class:`numpy.ndarray`): The input iterable array.
Returns:
:class:`zip`: zip objects.
"""
a, b = tee(array)
next(b, None)
return zip(a, b)
def smooth(array, points, deg):
"""Smooth an array.
Args:
array (:class:`numpy.ndarray`): Input array.
points (int): Points of smoothing.
deg (int): Degree of smoothing.
Returns:
:class:`numpy.ndarray`: smoothed array
"""
n = array.size
if points == 5:
if deg == 2:
w_2 = np.array([31., 9., -3., -5., 3.])/35.
w_1 = np.array([ 9., 13., 12., 6., -5.])/35.
w_0 = np.array([-3., 12., 17., 12., -3.])/35.
elif deg == 3:
w_2 = np.array([69., 4., -6., 4., -1.])/70.
w_1 = np.array([ 2., 27., 12., -8., 2.])/35.
w_0 = np.array([-3., 12., 17., 12., -3.])/35.
a = np.zeros((n, n))
a[0, 0:5] = w_2
a[1, 0:5] = w_1
for i in np.arange(2, n-2):
a[i, i-2:i+3] = w_0
a[-2, -5:] = w_1[::-1]
a[-1, -5:] = w_2[::-1]
result = np.matrix(a)*np.matrix(array.reshape(-1,1))
return np.array(result)[:,0]
def iterative_savgol_filter(y, winlen=5, order=3, maxiter=10,
upper_clip=None, lower_clip=None):
"""Smooth the input array with Savitzky-Golay filter with lower and/or
upper clippings.
Args:
y (:class:`numpy.ndarray`): Input array.
winlen (int): Window length of Savitzky-Golay filter.
order (int): Order of Savitzky-Gaoly filter.
maxiter (int): Maximum number of iterations.
lower_clip (float): Lower sigma-clipping value.
upper_clip (float): Upper sigma-clipping value.
Returns:
tuple: A tuple containing:
* **ysmooth** (:class:`numpy.ndarray`) – Smoothed y values.
* **yres** (:class:`numpy.ndarray`) – Residuals of y values.
* **mask** (:class:`numpy.ndarray`) – Mask of y values.
* **std** (float) – Standard deviation.
"""
x = np.arange(y.size)
mask = np.ones_like(y, dtype=np.bool)
for ite in range(maxiter):
# fill masked values in y using interpolation
f = intp.InterpolatedUnivariateSpline(x[mask], y[mask], k=3)
ysmooth = savgol_filter(f(x), window_length=winlen, polyorder=order)
yres = y - ysmooth
std = yres[mask].std()
# generate new mask
# make a copy of existing mask
new_mask = mask * np.ones_like(mask, dtype=np.bool)
# give new mask with lower and upper clipping value
if lower_clip is not None:
new_mask *= (yres > -lower_clip * std)
if upper_clip is not None:
new_mask *= (yres < upper_clip * std)
if new_mask.sum() == mask.sum():
break
mask = new_mask
return ysmooth, yres, mask, std
| gamse/utils/onedarray.py | 8,513 | Get the first derivative of data arrays (*x*, *y*).
If **y** is not given, the first argument will be taken as **y**, and the
differential of the input array will be returned.
Args:
x (list or :class:`numpy.ndarray`): X-values of the input array (optional).
y (list or :class:`numpy.ndarray`): Y-values of the input array.
points (int): Number of points used to calculate derivative
(optional, default is 3).
Returns:
:class:`numpy.ndarray`: Derivative of the input array.
Detect the edge indcies of a binary 1-D array.
Args:
array (:class:`numpy.ndarray`): A list or Numpy 1d array, with binary
(0/1) or boolean (True/False) values.
Returns:
list: A list containing starting and ending indices of the non-zero
blocks.
Examples:
.. code-block:: python
>>> a = [0,1,1,0,0,0,1,0,1]
>>> get_edge_bin(a)
[(1, 3), (6, 7), (8, 9)]
>>> b = [True, False, True, True, False, False]
>>> get_edge_bin(b)
[(0, 1), (2, 4)]
Get the local minima of a 1d array in a window.
Args:
x (:class:`numpy.ndarray`): A list or Numpy 1d array.
window (*int* or :class:`numpy.ndarray`): An odd integer or a list of
odd integers as the lengthes of searching window.
Returns:
tuple: A tuple containing:
* **index** (:class:`numpy.ndarray`): A numpy 1d array containing
indices of all local minima.
* **x[index]** (:class:`numpy.ndarray`): A numpy 1d array containing
values of all local minima.
Replace the None elemnets at the beginning and the end of list by auto
increment integers.
Convert the first and last few `None` elements to auto increment integers.
These integers are determined by the first and last integers in the input
array.
While the `None` elements between two integers in the input list will
remain.
Args:
lst (list): A list contaning None values.
Returns:
newlst (list): A list containing auto increment integers.
Examples:
.. code-block:: python
>>> a = [None,None,3,4,None,5,6,None,None]
>>> implete_none(a)
[1, 2, 3, 4, None, 5, 6, 7, 8]
Smooth the input array with Savitzky-Golay filter with lower and/or
upper clippings.
Args:
y (:class:`numpy.ndarray`): Input array.
winlen (int): Window length of Savitzky-Golay filter.
order (int): Order of Savitzky-Gaoly filter.
maxiter (int): Maximum number of iterations.
lower_clip (float): Lower sigma-clipping value.
upper_clip (float): Upper sigma-clipping value.
Returns:
tuple: A tuple containing:
* **ysmooth** (:class:`numpy.ndarray`) – Smoothed y values.
* **yres** (:class:`numpy.ndarray`) – Residuals of y values.
* **mask** (:class:`numpy.ndarray`) – Mask of y values.
* **std** (float) – Standard deviation.
Return pairwises of an iterable arrary.
Args:
array (list or :class:`numpy.ndarray`): The input iterable array.
Returns:
:class:`zip`: zip objects.
Smooth an array.
Args:
array (:class:`numpy.ndarray`): Input array.
points (int): Points of smoothing.
deg (int): Degree of smoothing.
Returns:
:class:`numpy.ndarray`: smoothed array
window is not given window is given window is an integer window is a numpy arrayif np.issubdtype(window.dtype, int): window are not integers not all of the windows are odd filter the None values first not-None element and its index last not-None element and its index fill masked values in y using interpolation generate new mask make a copy of existing mask give new mask with lower and upper clipping value | 3,600 | en | 0.607269 |
#!/usr/bin/env python
import argparse
import bs4
import os
import re
# better indentation hack via https://stackoverflow.com/a/15513483/127114
orig_prettify = bs4.BeautifulSoup.prettify
r = re.compile(r'^(\s*)', re.MULTILINE)
def prettify(self, encoding=None, formatter="minimal", indent_width=3):
return r.sub(r'\1' * indent_width, orig_prettify(self, encoding, formatter))
bs4.BeautifulSoup.prettify = prettify
def process_file(filepath):
"""
Rewrite links in `filepath` as follows: /some/path/index.html --> /some/path/
"""
# print('processing', filepath)
if filepath.endswith('.html'):
# 1. read
with open(filepath, 'r') as htmlfile:
page = bs4.BeautifulSoup(htmlfile.read(), 'html.parser')
# 2. rewrite links
links = page.find_all('a')
for link in links:
href = link['href']
if href.endswith('index.html'):
href = href.replace('index.html', '')
link['href'] = href
# 3. hack to rewrite subtitle links that wget doesn't handle correctly
video = page.find('video')
if video:
source = video.find('source')
main_file = source['src']
tracks = video.find_all('track')
if tracks:
for track in tracks:
# track_src = track['src']
# new_src = os.path.basename(track_src)
new_src = main_file.replace('.mp4', '.vtt')
track['src'] = new_src
# 4. write
with open(filepath, 'w') as htmlfile:
html = page.prettify()
htmlfile.write(html)
def deindexify(webroot):
"""
Walks directory stucutre starting at `webroot` and rewrites all folder links.
"""
content_folders = list(os.walk(webroot))
for rel_path, _subfolders, filenames in content_folders:
# print('processing folder ' + str(rel_path))
for filename in filenames:
filepath = os.path.join(rel_path, filename)
if filepath.endswith('_Subtitle.vtt'):
video_matching_filepath = filepath.replace('_Subtitle.vtt', '_Low_Resolution.vtt')
os.rename(filepath, video_matching_filepath)
else:
process_file(filepath)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("webroot", help="Directory where website is stored.")
args = parser.parse_args()
deindexify(args.webroot)
print('Removing index.html from folder links done.')
| scripts/deindexify.py | 2,583 | Walks directory stucutre starting at `webroot` and rewrites all folder links.
Rewrite links in `filepath` as follows: /some/path/index.html --> /some/path/
!/usr/bin/env python better indentation hack via https://stackoverflow.com/a/15513483/127114 print('processing', filepath) 1. read 2. rewrite links 3. hack to rewrite subtitle links that wget doesn't handle correctly track_src = track['src'] new_src = os.path.basename(track_src) 4. write print('processing folder ' + str(rel_path)) | 489 | en | 0.737923 |
"""
Based on REST Framework Parsers, optimized for csv
Parsers are used to parse the content of incoming HTTP requests.
They give us a generic way of being able to handle various media types
on the request, such as form content or json encoded data.
"""
import codecs
from urllib import parse
from django.conf import settings
from django.core.files.uploadhandler import StopFutureHandlers
from django.http import QueryDict
from django.http.multipartparser import ChunkIter
from django.http.multipartparser import \
MultiPartParser as DjangoMultiPartParser
from django.http.multipartparser import MultiPartParserError, parse_header
from django.utils.encoding import force_str
from rest_framework import renderers
from rest_framework.exceptions import ParseError
from rest_framework.settings import api_settings
from rest_framework.utils import json
class DataAndFiles:
def __init__(self, data, files):
self.data = data
self.files = files
class BaseParser:
"""
All parsers should extend `BaseParser`, specifying a `media_type`
attribute, and overriding the `.parse()` method.
"""
media_type = None
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
raise NotImplementedError(".parse() must be overridden.")
class MParser(BaseParser):
"""
Parser for file upload data.
"""
media_type = '*/*'
errors = {
'unhandled': 'FileUpload parse error - none of upload handlers can handle the stream',
'no_filename': 'Missing filename. Request should include a Content-Disposition header with a filename parameter.',
}
def parse(self, stream, media_type=None, parser_context=None):
"""
Treats the incoming bytestream as a raw file upload and returns
a `DataAndFiles` object.
`.data` will be None (we expect request body to be a file content).
`.files` will be a `QueryDict` containing one 'file' element.
"""
parser_context = parser_context or {}
request = parser_context['request']
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
meta = request.META
upload_handlers = request.upload_handlers
filename = self.get_filename(stream, media_type, parser_context)
# Note that this code is extracted from Django's handling of
# file uploads in MultiPartParser.
content_type = meta.get('HTTP_CONTENT_TYPE',
meta.get('CONTENT_TYPE', ''))
try:
content_length = int(meta.get('HTTP_CONTENT_LENGTH',
meta.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = None
# See if the handler will want to take care of the parsing.
for handler in upload_handlers:
result = handler.handle_raw_input(stream,
meta,
content_length,
None,
encoding)
if result is not None:
return DataAndFiles({}, {'file': result[1]})
# This is the standard case.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
chunk_size = min([2 ** 31 - 4] + possible_sizes)
chunks = ChunkIter(stream, chunk_size)
counters = [0] * len(upload_handlers)
for index, handler in enumerate(upload_handlers):
try:
handler.new_file(None, filename, content_type,
content_length, encoding)
except StopFutureHandlers:
upload_handlers = upload_handlers[:index + 1]
break
for chunk in chunks:
for index, handler in enumerate(upload_handlers):
"""
Trimming HttpResponse encapsulation from parsed file stream
"""
chunk_length = len(chunk)
start = chunk.find(bytes('\n\r\n','utf-8')) + 3
end = chunk.rfind(bytes('\r\n','utf-8'))
end = chunk[:end].rfind(bytes('\r\n','utf-8')) + 2
chunk = handler.receive_data_chunk(chunk[start:end], counters[index])
counters[index] += chunk_length
if chunk is None:
break
for index, handler in enumerate(upload_handlers):
file_obj = handler.file_complete(counters[index])
if file_obj is not None:
return DataAndFiles({}, {'file': file_obj})
raise ParseError(self.errors['unhandled'])
def get_filename(self, stream, media_type, parser_context):
"""
Detects the uploaded file name. First searches a 'filename' url kwarg.
Then tries to parse Content-Disposition header.
"""
try:
return parser_context['kwargs']['filename']
except KeyError:
pass
try:
meta = parser_context['request'].META
disposition = parse_header(meta['HTTP_CONTENT_DISPOSITION'].encode())
filename_parm = disposition[1]
if 'filename*' in filename_parm:
return self.get_encoded_filename(filename_parm)
return force_str(filename_parm['filename'])
except (AttributeError, KeyError, ValueError):
pass
def get_encoded_filename(self, filename_parm):
"""
Handle encoded filenames per RFC6266. See also:
https://tools.ietf.org/html/rfc2231#section-4
"""
encoded_filename = force_str(filename_parm['filename*'])
try:
charset, lang, filename = encoded_filename.split('\'', 2)
filename = parse.unquote(filename)
except (ValueError, LookupError):
filename = force_str(filename_parm['filename'])
return filename
| mparser.py | 6,183 | All parsers should extend `BaseParser`, specifying a `media_type`
attribute, and overriding the `.parse()` method.
Parser for file upload data.
Handle encoded filenames per RFC6266. See also:
https://tools.ietf.org/html/rfc2231#section-4
Detects the uploaded file name. First searches a 'filename' url kwarg.
Then tries to parse Content-Disposition header.
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
Treats the incoming bytestream as a raw file upload and returns
a `DataAndFiles` object.
`.data` will be None (we expect request body to be a file content).
`.files` will be a `QueryDict` containing one 'file' element.
Based on REST Framework Parsers, optimized for csv
Parsers are used to parse the content of incoming HTTP requests.
They give us a generic way of being able to handle various media types
on the request, such as form content or json encoded data.
Note that this code is extracted from Django's handling of file uploads in MultiPartParser. See if the handler will want to take care of the parsing. This is the standard case. | 1,161 | en | 0.768776 |
class CurveByPoints(CurveElement,IDisposable):
""" A curve interpolating two or more points. """
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetPoints(self):
"""
GetPoints(self: CurveByPoints) -> ReferencePointArray
Get the sequence of points interpolated by this curve.
"""
pass
def GetVisibility(self):
"""
GetVisibility(self: CurveByPoints) -> FamilyElementVisibility
Gets the visibility.
Returns: A copy of visibility settings for the curve.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def SetPoints(self,points):
"""
SetPoints(self: CurveByPoints,points: ReferencePointArray)
Change the sequence of points interpolated by this curve.
points: An array of 2 or more ReferencePoints.
"""
pass
def SetVisibility(self,visibility):
"""
SetVisibility(self: CurveByPoints,visibility: FamilyElementVisibility)
Sets the visibility.
"""
pass
@staticmethod
def SortPoints(arr):
"""
SortPoints(arr: ReferencePointArray) -> bool
Order a set of ReferencePoints in the same way Revit does
when creating a
curve from points.
arr: An array of ReferencePoints. The array is reordered
if sortPoints returns
true,and is unchanged if
sortPoints returns false.
Returns: False if the least-squares method is unable to find a solution;
true otherwise.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
IsReferenceLine=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsReferenceLine(self: CurveByPoints) -> bool
Set: IsReferenceLine(self: CurveByPoints)=value
"""
ReferenceType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Indicates the type of reference.
Get: ReferenceType(self: CurveByPoints) -> ReferenceType
Set: ReferenceType(self: CurveByPoints)=value
"""
SketchPlane=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Override the SketchPlane property of CurveElement.
Get: SketchPlane(self: CurveByPoints) -> SketchPlane
Set: SketchPlane(self: CurveByPoints)=value
"""
Subcategory=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The subcategory,or graphics style,of the CurveByPoints.
Get: Subcategory(self: CurveByPoints) -> GraphicsStyle
Set: Subcategory(self: CurveByPoints)=value
"""
Visible=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the point is visible when the family is loaded
into a project.
Get: Visible(self: CurveByPoints) -> bool
Set: Visible(self: CurveByPoints)=value
"""
| release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveByPoints.py | 3,530 | A curve interpolating two or more points.
Dispose(self: Element,A_0: bool)
GetPoints(self: CurveByPoints) -> ReferencePointArray
Get the sequence of points interpolated by this curve.
GetVisibility(self: CurveByPoints) -> FamilyElementVisibility
Gets the visibility.
Returns: A copy of visibility settings for the curve.
ReleaseUnmanagedResources(self: Element,disposing: bool)
SetPoints(self: CurveByPoints,points: ReferencePointArray)
Change the sequence of points interpolated by this curve.
points: An array of 2 or more ReferencePoints.
SetVisibility(self: CurveByPoints,visibility: FamilyElementVisibility)
Sets the visibility.
SortPoints(arr: ReferencePointArray) -> bool
Order a set of ReferencePoints in the same way Revit does
when creating a
curve from points.
arr: An array of ReferencePoints. The array is reordered
if sortPoints returns
true,and is unchanged if
sortPoints returns false.
Returns: False if the least-squares method is unable to find a solution;
true otherwise.
__enter__(self: IDisposable) -> object
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature
getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ
setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) | 1,506 | en | 0.671075 |
# -*- coding: UTF-8 -*-
"""Define transaction calendar"""
import calendar
import datetime
from collections import defaultdict
from utils import Exchange
class TransPeriod(object):
"""
The period of exchange transaction time, e.g. start_time, end_time of a day.
"""
def __init__(self, start_time, end_time):
self._start_time = None
self._end_time = None
if end_time > start_time:
self._start_time = start_time
self._end_time = end_time
else:
raise ValueError('Time Error')
@property
def start_time(self):
return self._start_time
@property
def end_time(self):
return self._end_time
def time_delta(self):
h = self._end_time.hour - self._start_time.hour
m = self._end_time.minute - self._start_time.minute
s = m * 60 + self._end_time.second - self._start_time.second
return datetime.timedelta(hours=h, seconds=s)
class TransCalendar(calendar.Calendar):
"""
The Exchange Transaction Calendar.
Constructor parameters:
day_periods: list of instance of Period,start_time, end_time
first_week_day: the first day of a week, e.g. calendar.SUNDAY
"""
SH_2017 = {2017: [(2017, 1, 1), (2017, 1, 2), (2017, 1, 27), (2017, 1, 28),
(2017, 1, 29), (2017, 1, 30), (2017, 1, 31), (2017, 2, 1),
(2017, 2, 2), (2017, 4, 2), (2017, 4, 3), (2017, 4, 4),
(2017, 5, 1), (2017, 5, 28), (2017, 5, 29), (2017, 5, 30),
(2017, 10, 1), (2017, 10, 2), (2017, 10, 3), (2017, 10, 4),
(2017, 10, 5), (2017, 10, 6), (2017, 10, 7), (2017, 10, 8)]}
Holidays_2017 = {Exchange.SH: SH_2017, Exchange.SZ: SH_2017}
def __init__(self, ex, day_periods, first_week_day=calendar.SUNDAY):
super(TransCalendar, self).__init__(firstweekday=first_week_day)
self._exchange = ex
self._day_periods = day_periods
self._holidays = defaultdict(list)
self.set_holiday(TransCalendar.Holidays_2017[self._exchange])
def set_holiday(self, holidays):
for year, holiday_list in holidays.items():
self._holidays[year] = [datetime.date(*holiday) for holiday in holiday_list]
def is_trans_day(self, dt):
if ((dt.date().weekday() == calendar.SATURDAY) or
(dt.date().weekday() == calendar.SUNDAY) or
(dt.date() in self._holidays[dt.year])):
return False
else:
return True
def is_trans_time(self, dt):
dt_time = dt.time()
for transPeriod in self._day_periods:
if (dt_time >= transPeriod.start_time) and (dt_time <= transPeriod.end_time):
return True
return False
@staticmethod
def next_trans_day(dt):
return dt
| receivers/Calender.py | 2,863 | The Exchange Transaction Calendar.
Constructor parameters:
day_periods: list of instance of Period,start_time, end_time
first_week_day: the first day of a week, e.g. calendar.SUNDAY
The period of exchange transaction time, e.g. start_time, end_time of a day.
Define transaction calendar
-*- coding: UTF-8 -*- | 310 | en | 0.725078 |
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Imagenet val. annotated by ReaL labels (https://arxiv.org/abs/2006.07159)."""
import json
import os
import tarfile
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = '''\
This dataset contains ILSVRC-2012 (ImageNet) validation images augmented with a
new set of "Re-Assessed" (ReaL) labels from the "Are we done with ImageNet"
paper, see https://arxiv.org/abs/2006.07159. These labels are collected using
the enhanced protocol, resulting in multi-label and more accurate annotations.
Important note: about 3500 examples contain no label, these should be [excluded
from the averaging when computing the accuracy](https://github.com/google-research/reassessed-imagenet#numpy).
One possible way of doing this is with the following NumPy code:
```python
is_correct = [pred in real_labels[i] for i, pred in enumerate(predictions) if real_labels[i]]
real_accuracy = np.mean(is_correct)
```
'''
_CITATION = '''\
@article{beyer2020imagenet,
title={Are we done with ImageNet?},
author={Lucas Beyer and Olivier J. Henaff and Alexander Kolesnikov and Xiaohua Zhai and Aaron van den Oord},
journal={arXiv preprint arXiv:2002.05709},
year={2020}
}
@article{ILSVRC15,
Author={Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},
Title={{ImageNet Large Scale Visual Recognition Challenge}},
Year={2015},
journal={International Journal of Computer Vision (IJCV)},
doi={10.1007/s11263-015-0816-y},
volume={115},
number={3},
pages={211-252}
}
'''
_VALIDATION_LABELS_FNAME = 'image_classification/imagenet2012_validation_labels.txt'
_LABELS_FNAME = 'image_classification/imagenet2012_labels.txt'
_REAL_LABELS_URL = 'https://raw.githubusercontent.com/google-research/reassessed-imagenet/master/real.json'
class Imagenet2012Real(tfds.core.GeneratorBasedBuilder):
"""ImageNet validation images with ReaL labels."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release',
}
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
manual_dir should contain `ILSVRC2012_img_val.tar` file.
You need to register on http://www.image-net.org/download-images in order
to get the link to download the dataset.
"""
def _info(self):
names_file = tfds.core.tfds_path(_LABELS_FNAME)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(encoding_format='jpeg'),
'original_label': tfds.features.ClassLabel(names_file=names_file),
'real_label': tfds.features.Sequence(
tfds.features.ClassLabel(names_file=names_file)),
'file_name': tfds.features.Text(),
}),
supervised_keys=('image', 'real_label'),
homepage='https://github.com/google-research/reassessed-imagenet',
citation=_CITATION,
)
def _get_real_labels(self, dl_manager):
with tf.io.gfile.GFile(dl_manager.download(_REAL_LABELS_URL), 'r') as f:
# ReaL labels are ordered in the lexicographical order.
return {'ILSVRC2012_val_{:08}.JPEG'.format(i + 1): labels
for i, labels in enumerate(json.load(f))}
@staticmethod
def _get_original_labels(val_path):
"""Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str).
"""
labels_path = os.fspath(tfds.core.tfds_path(_VALIDATION_LABELS_FNAME))
with tf.io.gfile.GFile(labels_path) as labels_f:
# `splitlines` to remove trailing `\r` in Windows
labels = labels_f.read().strip().splitlines()
with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:
tar = tarfile.open(mode='r:', fileobj=tar_f_obj)
images = sorted(tar.getnames())
return dict(zip(images, labels))
def _split_generators(self, dl_manager):
val_path = os.path.join(dl_manager.manual_dir, 'ILSVRC2012_img_val.tar')
if not tf.io.gfile.exists(val_path):
raise AssertionError(
'ImageNet requires manual download of the data. Please download '
'the train and val set and place them into: {}'.format(val_path))
return [
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'archive': dl_manager.iter_archive(val_path),
'original_labels': self._get_original_labels(val_path),
'real_labels': self._get_real_labels(dl_manager),
},
),
]
def _generate_examples(self, archive, original_labels, real_labels):
for fname, fobj in archive:
record = {
'file_name': fname,
'image': fobj,
'original_label': original_labels[fname],
'real_label': real_labels[fname],
}
yield fname, record
| tensorflow_datasets/image_classification/imagenet2012_real.py | 5,698 | ImageNet validation images with ReaL labels.
Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str).
Imagenet val. annotated by ReaL labels (https://arxiv.org/abs/2006.07159).
coding=utf-8 Copyright 2021 The TensorFlow Datasets Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ReaL labels are ordered in the lexicographical order. `splitlines` to remove trailing `\r` in Windows | 1,043 | en | 0.820072 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Dan Wendlandt, Nicira, Inc.
import mox
from quantum.agent.linux import ovs_lib, utils
from quantum.openstack.common import uuidutils
from quantum.tests import base
class OVS_Lib_Test(base.BaseTestCase):
"""
A test suite to excercise the OVS libraries shared by Quantum agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
"""
def setUp(self):
super(OVS_Lib_Test, self).setUp()
self.BR_NAME = "br-int"
self.TO = "--timeout=2"
self.mox = mox.Mox()
self.root_helper = 'sudo'
self.br = ovs_lib.OVSBridge(self.BR_NAME, self.root_helper)
self.mox.StubOutWithMock(utils, "execute")
self.addCleanup(self.mox.UnsetStubs)
def test_vifport(self):
"""create and stringify vif port, confirm no exceptions"""
self.mox.ReplayAll()
pname = "vif1.0"
ofport = 5
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
# test __init__
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
# test __str__
foo = str(port)
self.mox.VerifyAll()
def test_reset_bridge(self):
utils.execute(["ovs-vsctl", self.TO, "--",
"--if-exists", "del-br", self.BR_NAME],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "add-br", self.BR_NAME],
root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.reset_bridge()
self.mox.VerifyAll()
def test_delete_port(self):
pname = "tap5"
utils.execute(["ovs-vsctl", self.TO, "--", "--if-exists",
"del-port", self.BR_NAME, pname],
root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.delete_port(pname)
self.mox.VerifyAll()
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,dl_src=ca:fe:de:ad:be:ef"
",actions=strip_vlan,output:0"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=1,actions=normal"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,actions=drop"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,in_port=%s,actions=drop" % ofport],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=4,in_port=%s,dl_vlan=%s,"
"actions=strip_vlan,set_tunnel:%s,normal"
% (ofport, vid, lsw_id)],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=3,tun_id=%s,actions="
"mod_vlan_vid:%s,output:%s"
% (lsw_id, vid, ofport)], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.add_flow(priority=2, dl_src="ca:fe:de:ad:be:ef",
actions="strip_vlan,output:0")
self.br.add_flow(priority=1, actions="normal")
self.br.add_flow(priority=2, actions="drop")
self.br.add_flow(priority=2, in_port=ofport, actions="drop")
self.br.add_flow(priority=4, in_port=ofport, dl_vlan=vid,
actions="strip_vlan,set_tunnel:%s,normal" %
(lsw_id))
self.br.add_flow(priority=3, tun_id=lsw_id,
actions="mod_vlan_vid:%s,output:%s" %
(vid, ofport))
self.mox.VerifyAll()
def test_get_port_ofport(self):
pname = "tap99"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.get_port_ofport(pname), ofport)
self.mox.VerifyAll()
def test_get_datapath_id(self):
datapath_id = '"0000b67f4fbcc149"'
utils.execute(["ovs-vsctl", self.TO, "get",
"Bridge", self.BR_NAME, "datapath_id"],
root_helper=self.root_helper).AndReturn(datapath_id)
self.mox.ReplayAll()
self.assertEqual(self.br.get_datapath_id(), datapath_id.strip('"'))
self.mox.VerifyAll()
def test_count_flows(self):
utils.execute(["ovs-ofctl", "dump-flows", self.BR_NAME],
root_helper=self.root_helper).AndReturn('ignore'
'\nflow-1\n')
self.mox.ReplayAll()
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self.mox.VerifyAll()
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"in_port=" + ofport], root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"tun_id=%s" % lsw_id], root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"dl_vlan=%s" % vid], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
self.mox.VerifyAll()
def test_add_tunnel_port(self):
pname = "tap99"
ip = "9.9.9.9"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "add-port",
self.BR_NAME, pname], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=gre"], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:remote_ip=" + ip],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:in_key=flow"],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:out_key=flow"],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.add_tunnel_port(pname, ip), ofport)
self.mox.VerifyAll()
def test_add_patch_port(self):
pname = "tap99"
peer = "bar10"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "add-port",
self.BR_NAME, pname], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=patch"], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set",
"Interface", pname, "options:peer=" + peer],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.add_patch_port(pname, peer), ofport)
self.mox.VerifyAll()
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = "6"
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
utils.execute(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper).AndReturn("%s\n" % pname)
if is_xen:
external_ids = ('{xs-vif-uuid="%s", attached-mac="%s"}'
% (vif_id, mac))
else:
external_ids = ('{iface-id="%s", attached-mac="%s"}'
% (vif_id, mac))
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "external_ids"],
root_helper=self.root_helper).AndReturn(external_ids)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
if is_xen:
utils.execute(["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=" + vif_id],
root_helper=self.root_helper).AndReturn(vif_id)
self.mox.ReplayAll()
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
self.mox.VerifyAll()
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(True)
def test_clear_db_attribute(self):
pname = "tap77"
utils.execute(["ovs-vsctl", self.TO, "clear", "Port",
pname, "tag"], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.clear_db_attribute("Port", pname, "tag")
self.mox.VerifyAll()
def test_port_id_regex(self):
result = ('external_ids : {attached-mac="fa:16:3e:23:5b:f2",'
' iface-id="5c1321a7-c73f-4a77-95e6-9f86402e5c8f",'
' iface-status=active}\nname :'
' "dhc5c1321a7-c7"\nofport : 2\n')
match = self.br.re_id.search(result)
vif_mac = match.group('vif_mac')
vif_id = match.group('vif_id')
port_name = match.group('port_name')
ofport = int(match.group('ofport'))
self.assertEqual(vif_mac, 'fa:16:3e:23:5b:f2')
self.assertEqual(vif_id, '5c1321a7-c73f-4a77-95e6-9f86402e5c8f')
self.assertEqual(port_name, 'dhc5c1321a7-c7')
self.assertEqual(ofport, 2)
def test_iface_to_br(self):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper).AndReturn('br-int')
self.mox.ReplayAll()
self.assertEqual(ovs_lib.get_bridge_for_iface(root_helper, iface), br)
self.mox.VerifyAll()
def test_iface_to_br(self):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper).AndRaise(Exception)
self.mox.ReplayAll()
self.assertIsNone(ovs_lib.get_bridge_for_iface(root_helper, iface))
self.mox.VerifyAll()
def test_delete_all_ports(self):
self.mox.StubOutWithMock(self.br, 'get_port_name_list')
self.br.get_port_name_list().AndReturn(['port1'])
self.mox.StubOutWithMock(self.br, 'delete_port')
self.br.delete_port('port1')
self.mox.ReplayAll()
self.br.delete_ports(all_ports=True)
self.mox.VerifyAll()
def test_delete_quantum_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
'ca:fe:de:ad:be:ef', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
'ca:ee:de:ad:be:ef', 'br')
ports = [port1, port2]
self.mox.StubOutWithMock(self.br, 'get_vif_ports')
self.br.get_vif_ports().AndReturn([port1, port2])
self.mox.StubOutWithMock(self.br, 'delete_port')
self.br.delete_port('tap1234')
self.br.delete_port('tap5678')
self.mox.ReplayAll()
self.br.delete_ports(all_ports=False)
self.mox.VerifyAll()
def test_get_bridges(self):
bridges = ['br-int', 'br-ex']
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "list-br"],
root_helper=root_helper).AndReturn('br-int\nbr-ex\n')
self.mox.ReplayAll()
self.assertEqual(ovs_lib.get_bridges(root_helper), bridges)
self.mox.VerifyAll()
| quantum/tests/unit/openvswitch/test_ovs_lib.py | 14,175 | A test suite to excercise the OVS libraries shared by Quantum agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
create and stringify vif port, confirm no exceptions
vim: tabstop=4 shiftwidth=4 softtabstop=4 Copyright 2012, Nicira, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @author: Dan Wendlandt, Nicira, Inc. test __init__ test __str__ counts the number of flows as total lines of output - 2 | 996 | en | 0.836857 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.