max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
setup.py
|
killionadmin/ILOscripts
| 0
|
6625851
|
<filename>setup.py<gh_stars>0
from setuptools import setup, find_packages
setup(name='python-ilorest-library',
version='1.6.0',
description='iLO Rest Python Library',
author = 'Hewlett Packard Enterprise',
author_email = '<EMAIL>',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Communications'
],
keywords='HP Enterprise',
url='https://github.com/HewlettPackard/python-ilorest-library',
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=[
'jsonpatch',
'jsonpath_rw',
'jsonpointer',
'validictory',
'urlparse2',
])
|
<filename>setup.py<gh_stars>0
from setuptools import setup, find_packages
setup(name='python-ilorest-library',
version='1.6.0',
description='iLO Rest Python Library',
author = 'Hewlett Packard Enterprise',
author_email = '<EMAIL>',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Communications'
],
keywords='HP Enterprise',
url='https://github.com/HewlettPackard/python-ilorest-library',
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=[
'jsonpatch',
'jsonpath_rw',
'jsonpointer',
'validictory',
'urlparse2',
])
|
none
| 1
| 1.509692
| 2
|
|
info/InfoMeta.py
|
ffov/node_hierarchy
| 0
|
6625852
|
import json
class InfoMeta(object):
def __init__(self):
self.resultNodes = None
self.resultGraph = None
def __generateNodesJson__(self):
if self.resultNodes == None:
return []
result = []
for node in self.resultNodes:
result.append(node.__jsonObject__)
return result
def __generateNodesCSV__(self):
if self.resultNodes == None:
return ''
result = '"hostname","site","nodeid","ipv6addresses","status","lastseen","firstseen","autoupdater","branch","firmware","hardware"\n'
for node in self.resultNodes:
nodeData = node.__jsonObject__
nodeinfo = nodeData['nodeinfo']
result += '"'+nodeinfo['hostname']+'",'
try:
result +='"'+nodeinfo['system']['site_code']+'",'
except:
result += '"none",'
result += '"'+nodeinfo['node_id']+'","'
#add array of public IPv6 Addresses
addresses = node.__getPublicAddresses__()
for i, address in enumerate(addresses):
if i == len(addresses)-1:
result += address
else:
result += address + ','
result += '",'
if nodeData['flags']['online'] == True:
result += '"online",'
else:
result += '"offline",'
result += '"'+nodeData['lastseen']+'","'+nodeData['firstseen']+'",'
try:
if nodeinfo['software']['autoupdater']['enabled'] == True:
result += '"enabled",'
else:
result += '"disabled",'
except:
result += '"none",'
try:
result += '"'+nodeinfo['software']['autoupdater']['branch']+'",'
except:
result += '"none",'
result += '"'+nodeinfo['software']['firmware']['release']+'",'
try:
result += '"'+nodeinfo['hardware']['model']+'"'
except:
result += '"none"'
result += '\n'
return result
def writeCSVtoFile(self, filename):
with open(filename, 'w') as out:
out.write(self.__generateNodesCSV__())
def writeJsonToFile(self, filename):
with open(filename, 'w') as out:
out.write(json.dumps(self.__generateNodesJson__(), sort_keys=True, indent=4, ensure_ascii=False))
|
import json
class InfoMeta(object):
def __init__(self):
self.resultNodes = None
self.resultGraph = None
def __generateNodesJson__(self):
if self.resultNodes == None:
return []
result = []
for node in self.resultNodes:
result.append(node.__jsonObject__)
return result
def __generateNodesCSV__(self):
if self.resultNodes == None:
return ''
result = '"hostname","site","nodeid","ipv6addresses","status","lastseen","firstseen","autoupdater","branch","firmware","hardware"\n'
for node in self.resultNodes:
nodeData = node.__jsonObject__
nodeinfo = nodeData['nodeinfo']
result += '"'+nodeinfo['hostname']+'",'
try:
result +='"'+nodeinfo['system']['site_code']+'",'
except:
result += '"none",'
result += '"'+nodeinfo['node_id']+'","'
#add array of public IPv6 Addresses
addresses = node.__getPublicAddresses__()
for i, address in enumerate(addresses):
if i == len(addresses)-1:
result += address
else:
result += address + ','
result += '",'
if nodeData['flags']['online'] == True:
result += '"online",'
else:
result += '"offline",'
result += '"'+nodeData['lastseen']+'","'+nodeData['firstseen']+'",'
try:
if nodeinfo['software']['autoupdater']['enabled'] == True:
result += '"enabled",'
else:
result += '"disabled",'
except:
result += '"none",'
try:
result += '"'+nodeinfo['software']['autoupdater']['branch']+'",'
except:
result += '"none",'
result += '"'+nodeinfo['software']['firmware']['release']+'",'
try:
result += '"'+nodeinfo['hardware']['model']+'"'
except:
result += '"none"'
result += '\n'
return result
def writeCSVtoFile(self, filename):
with open(filename, 'w') as out:
out.write(self.__generateNodesCSV__())
def writeJsonToFile(self, filename):
with open(filename, 'w') as out:
out.write(json.dumps(self.__generateNodesJson__(), sort_keys=True, indent=4, ensure_ascii=False))
|
en
| 0.426433
|
#add array of public IPv6 Addresses
| 2.909115
| 3
|
jet_hadron/event_gen/pythia6.py
|
raymondEhlers/alice-jet-hadron
| 1
|
6625853
|
<reponame>raymondEhlers/alice-jet-hadron
#!/usr/bin/env python
import numpy as np
import pyjet.utils
from typing import Any, Iterable, Optional, Tuple
from jet_hadron.event_gen import generator
DTYPE_EP = np.dtype([("E", np.float64), ("px", np.float64), ("py", np.float64), ("pz", np.float64)])
class Pythia6(generator.Generator):
""" PYTHIA 6 event generator.
Defaults to the Perugia 2012 tune (tune number 370).
Note:
TPythia6 is a singleton class (even though this isn't called out super clearly in the docs).
Each ``Initialize(...)`` call will go to the singleton instance.
Args:
sqrt_s: Center of momentum energy.
random_seed: Random seed for the generator. Default: None, which will be totally random.
tune_number: PYTHIA 6 tune number.
pt_hard: Pt hard bin values.
Attributes:
generator: The event generator object.
sqrt_s: The center of momentum energy in GeV.
random_seed: Random seed for the generator.
initialized: True if the generator has been initialized.
pt_hard: Pt hard bin values.
"""
def __init__(self, tune_number: int = 370,
pt_hard: Tuple[Optional[float], Optional[float]] = (None, None), *args: Any, **kwargs: Any):
# Lazy load ROOT here in an attempt to enable multiprocessing.
# Unfortunately, it still doesn't appear to work, but I'll leave it here in hopes
# that it will work some day...
import ROOT
# Before anything else, ensure that the Pythia6 library. Otherwise, it will crash.
# LHAPDF is required to be loaded to use Perguia 2012
ROOT.gSystem.Load("liblhapdf")
# These pythia libraries are commonly loaded by ALICE, so we emulate them.
ROOT.gSystem.Load("libEGPythia6")
ROOT.gSystem.Load("libpythia6_4_28")
# Sadly, it appears that this is also required. Without it, `_pyr_` isn't defined. After a bit of digging,
# it appears that this is related to the PYTHIA random number generator interface `PYR`. `AliPythiaRndm` is
# where it's actually implemented (look for `pyr_`). This could probably be avoided if one builds PYTHIA 6
# without relying on AliRoot to do perform build.
ROOT.gSystem.Load("libAliPythia6")
# Next, setup the base class
super().__init__(
ROOT.TPythia6(), *args, **kwargs,
)
# Store the other parameters.
self.pt_hard = pt_hard
self.tune_number = tune_number
# The setup the generator
self.initialized = False
def tune(self, tune_number: int) -> None:
""" Set the Pythia tune. """
if self.initialized is False:
self.generator.Pytune(tune_number)
else:
raise RuntimeError("Cannot change the tune after PYTHIA has been initialized.")
def _customize_tune(self) -> None:
""" Provide additional tune customization. """
...
def setup(self) -> bool:
""" Setup the PYTHIA 6 generator.
Returns:
True if setup was successful.
"""
if self.initialized is True:
raise RuntimeError("This PYTHIA6 instsance has already been initialized")
# Basic setup
# Pt hard
if self.pt_hard[0]:
self.generator.SetCKIN(3, self.pt_hard[0])
if self.pt_hard[1]:
self.generator.SetCKIN(4, self.pt_hard[1])
# Random seed
self.generator.SetMRPY(1, self.random_seed)
# Specify or otherwise customize the tune.
if self.tune_number:
self.tune(self.tune_number)
# Customize the tune (perhaps further beyond the tune number) if desired
self._customize_tune()
# Finally, initialize PYTHIA for pp at the given sqrt(s)
self.generator.Initialize("cms", "p", "p", self.sqrt_s)
# Store that it's initialized to ensure that it it isn't missed.
self.initialized = True
return self.initialized
def _format_output(self) -> generator.Event:
""" Convert the output from the generator for into a format suitable for further processing.
Args:
None.
Returns:
Event level information and the input particles.
"""
# Setup
status_dtype = DTYPE_EP.descr + [("status_code", np.int32)]
# Retrieve particles
particles = self.generator.GetListOfParticles()
n_particles = particles.GetEntries()
particles_array = np.empty(n_particles, dtype = status_dtype)
# Store the particles from pythia. Unfortunately, we have to loop here, so the performance probably
# isn't going to be amazing.
# The Pythia particles are 1 indexed, so we start at 1.
# NOTE: output_index := pythia_index - 1, but we define both for convenience
for output_index, pythia_index in enumerate(range(1, n_particles + 1)):
# Format: E, px, py, py, KS (status code)
particles_array[output_index] = np.array(
(
self.generator.GetP(pythia_index, 4),
self.generator.GetP(pythia_index, 1),
self.generator.GetP(pythia_index, 2),
self.generator.GetP(pythia_index, 3),
self.generator.GetK(pythia_index, 1),
),
dtype = status_dtype,
)
# Filter out some particles
# According to the PYTHIA manual: "The ground rule is that codes 1–10 correspond to currently
# existing partons/particles, while larger codes contain partons/particles which no longer exist,
# or other kinds of event information."
filtered_array = particles_array[(particles_array["status_code"] != 0) & (particles_array["status_code"] <= 10)]
# Convert from (E, px, py, pz) -> (pT, eta, phi, mass)
filtered_array = pyjet.utils.ep2ptepm(filtered_array)
# Determine event properties
event_properties = generator.EventProperties(
cross_section = self.generator.GetPARI(1),
pt_hard = self.generator.GetVINT(47),
)
return event_properties, filtered_array
def __call__(self, n_events: int) -> Iterable[generator.Event]:
""" Generate an event with Pythia 6.
Args:
n_events: Number of events to generate.
Returns:
Generator to provide the requested number of events.
"""
# Validation
if not self.initialized:
raise RuntimeError("Pythia6 was not yet initialized.")
if self.pt_hard[0] and self.pt_hard[0] != self.generator.GetCKIN(3):
raise ValueError(
f"Min pt hard bin not set properly in PYTHIA6! Specified: {self.pt_hard[0]},"
f" PYTHIA value: {self.generator.GetCKIN(3)}"
)
if self.pt_hard[1] and self.pt_hard[1] != self.generator.GetCKIN(4):
raise ValueError(
f"Max pt hard bin not set properly in PYTHIA6! Specified: {self.pt_hard[1]},"
f" PYTHIA value: {self.generator.GetCKIN(4)}"
)
for i in range(n_events):
# Call Pyevnt()
self.generator.GenerateEvent()
yield self._format_output()
|
#!/usr/bin/env python
import numpy as np
import pyjet.utils
from typing import Any, Iterable, Optional, Tuple
from jet_hadron.event_gen import generator
DTYPE_EP = np.dtype([("E", np.float64), ("px", np.float64), ("py", np.float64), ("pz", np.float64)])
class Pythia6(generator.Generator):
""" PYTHIA 6 event generator.
Defaults to the Perugia 2012 tune (tune number 370).
Note:
TPythia6 is a singleton class (even though this isn't called out super clearly in the docs).
Each ``Initialize(...)`` call will go to the singleton instance.
Args:
sqrt_s: Center of momentum energy.
random_seed: Random seed for the generator. Default: None, which will be totally random.
tune_number: PYTHIA 6 tune number.
pt_hard: Pt hard bin values.
Attributes:
generator: The event generator object.
sqrt_s: The center of momentum energy in GeV.
random_seed: Random seed for the generator.
initialized: True if the generator has been initialized.
pt_hard: Pt hard bin values.
"""
def __init__(self, tune_number: int = 370,
pt_hard: Tuple[Optional[float], Optional[float]] = (None, None), *args: Any, **kwargs: Any):
# Lazy load ROOT here in an attempt to enable multiprocessing.
# Unfortunately, it still doesn't appear to work, but I'll leave it here in hopes
# that it will work some day...
import ROOT
# Before anything else, ensure that the Pythia6 library. Otherwise, it will crash.
# LHAPDF is required to be loaded to use Perguia 2012
ROOT.gSystem.Load("liblhapdf")
# These pythia libraries are commonly loaded by ALICE, so we emulate them.
ROOT.gSystem.Load("libEGPythia6")
ROOT.gSystem.Load("libpythia6_4_28")
# Sadly, it appears that this is also required. Without it, `_pyr_` isn't defined. After a bit of digging,
# it appears that this is related to the PYTHIA random number generator interface `PYR`. `AliPythiaRndm` is
# where it's actually implemented (look for `pyr_`). This could probably be avoided if one builds PYTHIA 6
# without relying on AliRoot to do perform build.
ROOT.gSystem.Load("libAliPythia6")
# Next, setup the base class
super().__init__(
ROOT.TPythia6(), *args, **kwargs,
)
# Store the other parameters.
self.pt_hard = pt_hard
self.tune_number = tune_number
# The setup the generator
self.initialized = False
def tune(self, tune_number: int) -> None:
""" Set the Pythia tune. """
if self.initialized is False:
self.generator.Pytune(tune_number)
else:
raise RuntimeError("Cannot change the tune after PYTHIA has been initialized.")
def _customize_tune(self) -> None:
""" Provide additional tune customization. """
...
def setup(self) -> bool:
""" Setup the PYTHIA 6 generator.
Returns:
True if setup was successful.
"""
if self.initialized is True:
raise RuntimeError("This PYTHIA6 instsance has already been initialized")
# Basic setup
# Pt hard
if self.pt_hard[0]:
self.generator.SetCKIN(3, self.pt_hard[0])
if self.pt_hard[1]:
self.generator.SetCKIN(4, self.pt_hard[1])
# Random seed
self.generator.SetMRPY(1, self.random_seed)
# Specify or otherwise customize the tune.
if self.tune_number:
self.tune(self.tune_number)
# Customize the tune (perhaps further beyond the tune number) if desired
self._customize_tune()
# Finally, initialize PYTHIA for pp at the given sqrt(s)
self.generator.Initialize("cms", "p", "p", self.sqrt_s)
# Store that it's initialized to ensure that it it isn't missed.
self.initialized = True
return self.initialized
def _format_output(self) -> generator.Event:
""" Convert the output from the generator for into a format suitable for further processing.
Args:
None.
Returns:
Event level information and the input particles.
"""
# Setup
status_dtype = DTYPE_EP.descr + [("status_code", np.int32)]
# Retrieve particles
particles = self.generator.GetListOfParticles()
n_particles = particles.GetEntries()
particles_array = np.empty(n_particles, dtype = status_dtype)
# Store the particles from pythia. Unfortunately, we have to loop here, so the performance probably
# isn't going to be amazing.
# The Pythia particles are 1 indexed, so we start at 1.
# NOTE: output_index := pythia_index - 1, but we define both for convenience
for output_index, pythia_index in enumerate(range(1, n_particles + 1)):
# Format: E, px, py, py, KS (status code)
particles_array[output_index] = np.array(
(
self.generator.GetP(pythia_index, 4),
self.generator.GetP(pythia_index, 1),
self.generator.GetP(pythia_index, 2),
self.generator.GetP(pythia_index, 3),
self.generator.GetK(pythia_index, 1),
),
dtype = status_dtype,
)
# Filter out some particles
# According to the PYTHIA manual: "The ground rule is that codes 1–10 correspond to currently
# existing partons/particles, while larger codes contain partons/particles which no longer exist,
# or other kinds of event information."
filtered_array = particles_array[(particles_array["status_code"] != 0) & (particles_array["status_code"] <= 10)]
# Convert from (E, px, py, pz) -> (pT, eta, phi, mass)
filtered_array = pyjet.utils.ep2ptepm(filtered_array)
# Determine event properties
event_properties = generator.EventProperties(
cross_section = self.generator.GetPARI(1),
pt_hard = self.generator.GetVINT(47),
)
return event_properties, filtered_array
def __call__(self, n_events: int) -> Iterable[generator.Event]:
""" Generate an event with Pythia 6.
Args:
n_events: Number of events to generate.
Returns:
Generator to provide the requested number of events.
"""
# Validation
if not self.initialized:
raise RuntimeError("Pythia6 was not yet initialized.")
if self.pt_hard[0] and self.pt_hard[0] != self.generator.GetCKIN(3):
raise ValueError(
f"Min pt hard bin not set properly in PYTHIA6! Specified: {self.pt_hard[0]},"
f" PYTHIA value: {self.generator.GetCKIN(3)}"
)
if self.pt_hard[1] and self.pt_hard[1] != self.generator.GetCKIN(4):
raise ValueError(
f"Max pt hard bin not set properly in PYTHIA6! Specified: {self.pt_hard[1]},"
f" PYTHIA value: {self.generator.GetCKIN(4)}"
)
for i in range(n_events):
# Call Pyevnt()
self.generator.GenerateEvent()
yield self._format_output()
|
en
| 0.855444
|
#!/usr/bin/env python PYTHIA 6 event generator. Defaults to the Perugia 2012 tune (tune number 370). Note: TPythia6 is a singleton class (even though this isn't called out super clearly in the docs). Each ``Initialize(...)`` call will go to the singleton instance. Args: sqrt_s: Center of momentum energy. random_seed: Random seed for the generator. Default: None, which will be totally random. tune_number: PYTHIA 6 tune number. pt_hard: Pt hard bin values. Attributes: generator: The event generator object. sqrt_s: The center of momentum energy in GeV. random_seed: Random seed for the generator. initialized: True if the generator has been initialized. pt_hard: Pt hard bin values. # Lazy load ROOT here in an attempt to enable multiprocessing. # Unfortunately, it still doesn't appear to work, but I'll leave it here in hopes # that it will work some day... # Before anything else, ensure that the Pythia6 library. Otherwise, it will crash. # LHAPDF is required to be loaded to use Perguia 2012 # These pythia libraries are commonly loaded by ALICE, so we emulate them. # Sadly, it appears that this is also required. Without it, `_pyr_` isn't defined. After a bit of digging, # it appears that this is related to the PYTHIA random number generator interface `PYR`. `AliPythiaRndm` is # where it's actually implemented (look for `pyr_`). This could probably be avoided if one builds PYTHIA 6 # without relying on AliRoot to do perform build. # Next, setup the base class # Store the other parameters. # The setup the generator Set the Pythia tune. Provide additional tune customization. Setup the PYTHIA 6 generator. Returns: True if setup was successful. # Basic setup # Pt hard # Random seed # Specify or otherwise customize the tune. # Customize the tune (perhaps further beyond the tune number) if desired # Finally, initialize PYTHIA for pp at the given sqrt(s) # Store that it's initialized to ensure that it it isn't missed. Convert the output from the generator for into a format suitable for further processing. Args: None. Returns: Event level information and the input particles. # Setup # Retrieve particles # Store the particles from pythia. Unfortunately, we have to loop here, so the performance probably # isn't going to be amazing. # The Pythia particles are 1 indexed, so we start at 1. # NOTE: output_index := pythia_index - 1, but we define both for convenience # Format: E, px, py, py, KS (status code) # Filter out some particles # According to the PYTHIA manual: "The ground rule is that codes 1–10 correspond to currently # existing partons/particles, while larger codes contain partons/particles which no longer exist, # or other kinds of event information." # Convert from (E, px, py, pz) -> (pT, eta, phi, mass) # Determine event properties Generate an event with Pythia 6. Args: n_events: Number of events to generate. Returns: Generator to provide the requested number of events. # Validation # Call Pyevnt()
| 2.34183
| 2
|
pypowerbi/datasets.py
|
rouzbeh-afrasiabi/pypowerbi
| 0
|
6625854
|
<reponame>rouzbeh-afrasiabi/pypowerbi
# -*- coding: future_fstrings -*-
import requests
import simplejson as json
from requests.exceptions import HTTPError
from .dataset import *
class Datasets:
# url snippets
groups_snippet = 'groups'
datasets_snippet = 'datasets'
tables_snippet = 'tables'
rows_snippet = 'rows'
parameters_snippet = 'parameters'
refreshes_snippet = 'refreshes'
relationships_snippet = 'relationships'
# json keys
get_datasets_value_key = 'value'
def __init__(self, client):
self.client = client
self.base_url = f'{self.client.api_url}/{self.client.api_version_snippet}/{self.client.api_myorg_snippet}'
def count(self, group_id=None):
"""
Evaluates the number of datasets
:param group_id: The optional group id
:return: The number of datasets as returned by the API
"""
return len(self.get_datasets(group_id))
def has_dataset(self, dataset_id, group_id=None):
"""
Evaluates if the dataset exists
:param dataset_id: The id of the dataset to evaluate
:param group_id: The optional group id
:return: True if the dataset exists, False otherwise
"""
datasets = self.get_datasets(group_id)
for dataset in datasets:
if dataset.id == str(dataset_id):
return True
return False
def get_datasets(self, group_id=None):
"""
Fetches all datasets
https://msdn.microsoft.com/en-us/library/mt203567.aspx
:param group_id: The optional group id to get datasets from
:return: The list of the datasets found
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}'
# form the headers
headers = self.client.auth_header
# get the response
response = requests.get(url, headers=headers)
# 200 is the only successful code, raise an exception on any other response code
if response.status_code != 200:
raise HTTPError(response, f'Get Datasets request returned http error: {response.json()}')
return self.datasets_from_get_datasets_response(response)
def get_dataset(self, dataset_id, group_id=None):
"""
Gets a single dataset
https://msdn.microsoft.com/en-us/library/mt784653.aspx
:param dataset_id: The id of the dataset to get
:param group_id: The optional id of the group to get the dataset from
:return: The dataset returned by the API
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}/{dataset_id}'
# form the headers
headers = self.client.auth_header
# get the response
response = requests.get(url, headers=headers)
# 200 is the only successful code, raise an exception on any other response code
if response.status_code != 200:
raise HTTPError(response, f'Get Datasets request returned http error: {response.json()}')
return Dataset.from_dict(json.loads(response.text))
def post_dataset(self, dataset, group_id=None):
"""
Posts a single dataset
https://msdn.microsoft.com/en-us/library/mt203562.aspx
:param dataset: The dataset to push
:param group_id: The optional group id to push the dataset to
:return: The pushed dataset as returned by the API
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}'
# form the headers
headers = self.client.auth_header
# form the json dict
json_dict = DatasetEncoder().default(dataset)
# get the response
response = requests.post(url, headers=headers, json=json_dict)
# 201 - Created. The request was fulfilled and a new Dataset was created.
if response.status_code != 201:
raise HTTPError(response, f'Post Datasets request returned http code: {response.json()}')
return Dataset.from_dict(json.loads(response.text))
def delete_dataset(self, dataset_id, group_id=None):
"""
Deletes a dataset
:param dataset_id: The id of the dataset to delete
:param group_id: The optional group id to delete the dataset from
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}/{dataset_id}'
# form the headers
headers = self.client.auth_header
# get the response
response = requests.delete(url, headers=headers)
# 200 is the only successful code
if response.status_code != 200:
raise HTTPError(response, f'Delete Dataset request returned http error: {response.json()}')
def delete_all_datasets(self, group_id=None):
"""
Deletes all datasets
:param group_id: The optional group id of the group to delete all datasets from
"""
# get all the datasets and delete each one
datasets = self.get_datasets(group_id)
for dataset in datasets:
self.delete_dataset(group_id, dataset.id)
def get_tables(self, dataset_id, group_id=None):
"""
Gets tables from a dataset
https://msdn.microsoft.com/en-us/library/mt203556.aspx
:param dataset_id: The id of the dataset which to get tables from
:param group_id: The optional id of the group which to get tables from
:return: A list of tables from the given group and dataset
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}/{dataset_id}/{self.tables_snippet}/'
# form the headers
headers = self.client.auth_header
# get the response
response = requests.get(url, headers=headers)
# 200 is the only successful code, raise an exception on any other response code
if response.status_code != 200:
raise HTTPError(response, f'Get Datasets request returned http error: {response.json()}')
return self.tables_from_get_tables_response(response)
def post_rows(self, dataset_id, table_name, rows, group_id=None):
"""
Posts rows to a table in a given dataset
https://msdn.microsoft.com/en-us/library/mt203561.aspx
:param dataset_id: The id of the dataset to post rows to
:param table_name: The name of the table to post rows to
:param rows: The rows to post to the table
:param group_id: The optional id of the group to post rows to
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}/{dataset_id}/' \
f'{self.tables_snippet}/{table_name}/{self.rows_snippet}'
# form the headers
headers = self.client.auth_header
# form the json dict
row_encoder = RowEncoder()
json_dict = {
'rows': [row_encoder.default(x) for x in rows]
}
# get the response
response = requests.post(url, headers=headers, json=json_dict)
# 200 is the only successful code
if response.status_code != 200:
raise HTTPError(response, f'Post row request returned http error: {response.json()}')
def delete_rows(self, dataset_id, table_name, group_id=None):
"""
Deletes all rows from a table in a given dataset
https://msdn.microsoft.com/en-us/library/mt238041.aspx
:param dataset_id: The id of the dataset to delete the rows from
:param table_name: The name of the table to delete the rows from
:param group_id: The optional id of the group to delete the rows from
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}/{dataset_id}/' \
f'{self.tables_snippet}/{table_name}/{self.rows_snippet}'
# form the headers
headers = self.client.auth_header
# get the response
response = requests.delete(url, headers=headers)
# 200 is the only successful code
if response.status_code != 200:
raise HTTPError(response, f'Post row request returned http error: {response.json()}')
def get_dataset_parameters(self, dataset_id, group_id=None):
"""
Gets all parameters for a single dataset
https://msdn.microsoft.com/en-us/library/mt784653.aspx
:param dataset_id: The id of the dataset from which you want the parameters
:param group_id: The optional id of the group to get the dataset's parameters
:return: The dataset parameters returned by the API
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}/{dataset_id}/{self.parameters_snippet}'
# form the headers
headers = self.client.auth_header
# get the response
response = requests.get(url, headers=headers)
# 200 is the only successful code, raise an exception on any other response code
if response.status_code != 200:
raise HTTPError(response, f'Get Dataset parameters request returned http error: {response.json()}')
return json.loads(response.text)
def refresh_dataset(self, dataset_id, notify_option=None, group_id=None):
"""
Refreshes a single dataset
:param dataset_id: The id of the dataset to refresh
:param notify_option: The optional notify_option to add in the request body
:param group_id: The optional id of the group
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}/{dataset_id}/{self.refreshes_snippet}'
# form the headers
headers = self.client.auth_header
if notify_option is not None:
json_dict = {
'notifyOption': notify_option
}
else:
json_dict = None
# get the response
response = requests.post(url, headers=headers, json=json_dict)
# 200 is the only successful code, raise an exception on any other response code
if response.status_code != 202:
raise HTTPError(response, f'Refresh dataset request returned http error: {response.json()}')
@classmethod
def datasets_from_get_datasets_response(cls, response):
"""
Creates a list of datasets from a http response object
:param response: The http response object
:return: A list of datasets created from the given http response object
"""
# load the response into a dict
response_dict = json.loads(response.text)
datasets = []
# go through entries returned from API
for entry in response_dict[cls.get_datasets_value_key]:
datasets.append(Dataset.from_dict(entry))
return datasets
@classmethod
def tables_from_get_tables_response(cls, response):
"""
Creates a list of tables from a http response object
:param response: The http response object
:return: A list of tables created from the given http response object
"""
# load the response into a dict
response_dict = json.loads(response.text)
tables = []
# go through entries returned from API
for entry in response_dict[cls.get_datasets_value_key]:
tables.append(Table.from_dict(entry))
return tables
|
# -*- coding: future_fstrings -*-
import requests
import simplejson as json
from requests.exceptions import HTTPError
from .dataset import *
class Datasets:
# url snippets
groups_snippet = 'groups'
datasets_snippet = 'datasets'
tables_snippet = 'tables'
rows_snippet = 'rows'
parameters_snippet = 'parameters'
refreshes_snippet = 'refreshes'
relationships_snippet = 'relationships'
# json keys
get_datasets_value_key = 'value'
def __init__(self, client):
self.client = client
self.base_url = f'{self.client.api_url}/{self.client.api_version_snippet}/{self.client.api_myorg_snippet}'
def count(self, group_id=None):
"""
Evaluates the number of datasets
:param group_id: The optional group id
:return: The number of datasets as returned by the API
"""
return len(self.get_datasets(group_id))
def has_dataset(self, dataset_id, group_id=None):
"""
Evaluates if the dataset exists
:param dataset_id: The id of the dataset to evaluate
:param group_id: The optional group id
:return: True if the dataset exists, False otherwise
"""
datasets = self.get_datasets(group_id)
for dataset in datasets:
if dataset.id == str(dataset_id):
return True
return False
def get_datasets(self, group_id=None):
"""
Fetches all datasets
https://msdn.microsoft.com/en-us/library/mt203567.aspx
:param group_id: The optional group id to get datasets from
:return: The list of the datasets found
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}'
# form the headers
headers = self.client.auth_header
# get the response
response = requests.get(url, headers=headers)
# 200 is the only successful code, raise an exception on any other response code
if response.status_code != 200:
raise HTTPError(response, f'Get Datasets request returned http error: {response.json()}')
return self.datasets_from_get_datasets_response(response)
def get_dataset(self, dataset_id, group_id=None):
"""
Gets a single dataset
https://msdn.microsoft.com/en-us/library/mt784653.aspx
:param dataset_id: The id of the dataset to get
:param group_id: The optional id of the group to get the dataset from
:return: The dataset returned by the API
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}/{dataset_id}'
# form the headers
headers = self.client.auth_header
# get the response
response = requests.get(url, headers=headers)
# 200 is the only successful code, raise an exception on any other response code
if response.status_code != 200:
raise HTTPError(response, f'Get Datasets request returned http error: {response.json()}')
return Dataset.from_dict(json.loads(response.text))
def post_dataset(self, dataset, group_id=None):
"""
Posts a single dataset
https://msdn.microsoft.com/en-us/library/mt203562.aspx
:param dataset: The dataset to push
:param group_id: The optional group id to push the dataset to
:return: The pushed dataset as returned by the API
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}'
# form the headers
headers = self.client.auth_header
# form the json dict
json_dict = DatasetEncoder().default(dataset)
# get the response
response = requests.post(url, headers=headers, json=json_dict)
# 201 - Created. The request was fulfilled and a new Dataset was created.
if response.status_code != 201:
raise HTTPError(response, f'Post Datasets request returned http code: {response.json()}')
return Dataset.from_dict(json.loads(response.text))
def delete_dataset(self, dataset_id, group_id=None):
"""
Deletes a dataset
:param dataset_id: The id of the dataset to delete
:param group_id: The optional group id to delete the dataset from
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}/{dataset_id}'
# form the headers
headers = self.client.auth_header
# get the response
response = requests.delete(url, headers=headers)
# 200 is the only successful code
if response.status_code != 200:
raise HTTPError(response, f'Delete Dataset request returned http error: {response.json()}')
def delete_all_datasets(self, group_id=None):
"""
Deletes all datasets
:param group_id: The optional group id of the group to delete all datasets from
"""
# get all the datasets and delete each one
datasets = self.get_datasets(group_id)
for dataset in datasets:
self.delete_dataset(group_id, dataset.id)
def get_tables(self, dataset_id, group_id=None):
"""
Gets tables from a dataset
https://msdn.microsoft.com/en-us/library/mt203556.aspx
:param dataset_id: The id of the dataset which to get tables from
:param group_id: The optional id of the group which to get tables from
:return: A list of tables from the given group and dataset
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}/{dataset_id}/{self.tables_snippet}/'
# form the headers
headers = self.client.auth_header
# get the response
response = requests.get(url, headers=headers)
# 200 is the only successful code, raise an exception on any other response code
if response.status_code != 200:
raise HTTPError(response, f'Get Datasets request returned http error: {response.json()}')
return self.tables_from_get_tables_response(response)
def post_rows(self, dataset_id, table_name, rows, group_id=None):
"""
Posts rows to a table in a given dataset
https://msdn.microsoft.com/en-us/library/mt203561.aspx
:param dataset_id: The id of the dataset to post rows to
:param table_name: The name of the table to post rows to
:param rows: The rows to post to the table
:param group_id: The optional id of the group to post rows to
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}/{dataset_id}/' \
f'{self.tables_snippet}/{table_name}/{self.rows_snippet}'
# form the headers
headers = self.client.auth_header
# form the json dict
row_encoder = RowEncoder()
json_dict = {
'rows': [row_encoder.default(x) for x in rows]
}
# get the response
response = requests.post(url, headers=headers, json=json_dict)
# 200 is the only successful code
if response.status_code != 200:
raise HTTPError(response, f'Post row request returned http error: {response.json()}')
def delete_rows(self, dataset_id, table_name, group_id=None):
"""
Deletes all rows from a table in a given dataset
https://msdn.microsoft.com/en-us/library/mt238041.aspx
:param dataset_id: The id of the dataset to delete the rows from
:param table_name: The name of the table to delete the rows from
:param group_id: The optional id of the group to delete the rows from
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}/{dataset_id}/' \
f'{self.tables_snippet}/{table_name}/{self.rows_snippet}'
# form the headers
headers = self.client.auth_header
# get the response
response = requests.delete(url, headers=headers)
# 200 is the only successful code
if response.status_code != 200:
raise HTTPError(response, f'Post row request returned http error: {response.json()}')
def get_dataset_parameters(self, dataset_id, group_id=None):
"""
Gets all parameters for a single dataset
https://msdn.microsoft.com/en-us/library/mt784653.aspx
:param dataset_id: The id of the dataset from which you want the parameters
:param group_id: The optional id of the group to get the dataset's parameters
:return: The dataset parameters returned by the API
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}/{dataset_id}/{self.parameters_snippet}'
# form the headers
headers = self.client.auth_header
# get the response
response = requests.get(url, headers=headers)
# 200 is the only successful code, raise an exception on any other response code
if response.status_code != 200:
raise HTTPError(response, f'Get Dataset parameters request returned http error: {response.json()}')
return json.loads(response.text)
def refresh_dataset(self, dataset_id, notify_option=None, group_id=None):
"""
Refreshes a single dataset
:param dataset_id: The id of the dataset to refresh
:param notify_option: The optional notify_option to add in the request body
:param group_id: The optional id of the group
"""
# group_id can be none, account for it
if group_id is None:
groups_part = '/'
else:
groups_part = f'/{self.groups_snippet}/{group_id}/'
# form the url
url = f'{self.base_url}{groups_part}/{self.datasets_snippet}/{dataset_id}/{self.refreshes_snippet}'
# form the headers
headers = self.client.auth_header
if notify_option is not None:
json_dict = {
'notifyOption': notify_option
}
else:
json_dict = None
# get the response
response = requests.post(url, headers=headers, json=json_dict)
# 200 is the only successful code, raise an exception on any other response code
if response.status_code != 202:
raise HTTPError(response, f'Refresh dataset request returned http error: {response.json()}')
@classmethod
def datasets_from_get_datasets_response(cls, response):
"""
Creates a list of datasets from a http response object
:param response: The http response object
:return: A list of datasets created from the given http response object
"""
# load the response into a dict
response_dict = json.loads(response.text)
datasets = []
# go through entries returned from API
for entry in response_dict[cls.get_datasets_value_key]:
datasets.append(Dataset.from_dict(entry))
return datasets
@classmethod
def tables_from_get_tables_response(cls, response):
"""
Creates a list of tables from a http response object
:param response: The http response object
:return: A list of tables created from the given http response object
"""
# load the response into a dict
response_dict = json.loads(response.text)
tables = []
# go through entries returned from API
for entry in response_dict[cls.get_datasets_value_key]:
tables.append(Table.from_dict(entry))
return tables
|
en
| 0.753336
|
# -*- coding: future_fstrings -*- # url snippets # json keys Evaluates the number of datasets :param group_id: The optional group id :return: The number of datasets as returned by the API Evaluates if the dataset exists :param dataset_id: The id of the dataset to evaluate :param group_id: The optional group id :return: True if the dataset exists, False otherwise Fetches all datasets https://msdn.microsoft.com/en-us/library/mt203567.aspx :param group_id: The optional group id to get datasets from :return: The list of the datasets found # group_id can be none, account for it # form the url # form the headers # get the response # 200 is the only successful code, raise an exception on any other response code Gets a single dataset https://msdn.microsoft.com/en-us/library/mt784653.aspx :param dataset_id: The id of the dataset to get :param group_id: The optional id of the group to get the dataset from :return: The dataset returned by the API # group_id can be none, account for it # form the url # form the headers # get the response # 200 is the only successful code, raise an exception on any other response code Posts a single dataset https://msdn.microsoft.com/en-us/library/mt203562.aspx :param dataset: The dataset to push :param group_id: The optional group id to push the dataset to :return: The pushed dataset as returned by the API # group_id can be none, account for it # form the url # form the headers # form the json dict # get the response # 201 - Created. The request was fulfilled and a new Dataset was created. Deletes a dataset :param dataset_id: The id of the dataset to delete :param group_id: The optional group id to delete the dataset from # group_id can be none, account for it # form the url # form the headers # get the response # 200 is the only successful code Deletes all datasets :param group_id: The optional group id of the group to delete all datasets from # get all the datasets and delete each one Gets tables from a dataset https://msdn.microsoft.com/en-us/library/mt203556.aspx :param dataset_id: The id of the dataset which to get tables from :param group_id: The optional id of the group which to get tables from :return: A list of tables from the given group and dataset # group_id can be none, account for it # form the url # form the headers # get the response # 200 is the only successful code, raise an exception on any other response code Posts rows to a table in a given dataset https://msdn.microsoft.com/en-us/library/mt203561.aspx :param dataset_id: The id of the dataset to post rows to :param table_name: The name of the table to post rows to :param rows: The rows to post to the table :param group_id: The optional id of the group to post rows to # group_id can be none, account for it # form the url # form the headers # form the json dict # get the response # 200 is the only successful code Deletes all rows from a table in a given dataset https://msdn.microsoft.com/en-us/library/mt238041.aspx :param dataset_id: The id of the dataset to delete the rows from :param table_name: The name of the table to delete the rows from :param group_id: The optional id of the group to delete the rows from # group_id can be none, account for it # form the url # form the headers # get the response # 200 is the only successful code Gets all parameters for a single dataset https://msdn.microsoft.com/en-us/library/mt784653.aspx :param dataset_id: The id of the dataset from which you want the parameters :param group_id: The optional id of the group to get the dataset's parameters :return: The dataset parameters returned by the API # group_id can be none, account for it # form the url # form the headers # get the response # 200 is the only successful code, raise an exception on any other response code Refreshes a single dataset :param dataset_id: The id of the dataset to refresh :param notify_option: The optional notify_option to add in the request body :param group_id: The optional id of the group # group_id can be none, account for it # form the url # form the headers # get the response # 200 is the only successful code, raise an exception on any other response code Creates a list of datasets from a http response object :param response: The http response object :return: A list of datasets created from the given http response object # load the response into a dict # go through entries returned from API Creates a list of tables from a http response object :param response: The http response object :return: A list of tables created from the given http response object # load the response into a dict # go through entries returned from API
| 2.713503
| 3
|
test_haystack/test_models.py
|
sgaist/django-haystack
| 1
|
6625855
|
<reponame>sgaist/django-haystack
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import logging as std_logging
import pickle
from django.test import TestCase
from test_haystack.core.models import MockModel
from haystack import connections
from haystack.models import SearchResult
from haystack.utils import log as logging
from haystack.utils.loading import UnifiedIndex
from .mocks import MockSearchResult
from .test_indexes import ReadQuerySetTestSearchIndex
class CaptureHandler(std_logging.Handler):
logs_seen = []
def emit(self, record):
CaptureHandler.logs_seen.append(record)
class SearchResultTestCase(TestCase):
fixtures = ['base_data']
def setUp(self):
super(SearchResultTestCase, self).setUp()
cap = CaptureHandler()
logging.getLogger('haystack').addHandler(cap)
self.no_data = {}
self.extra_data = {
'stored': 'I am stored data. How fun.',
}
self.no_overwrite_data = {
'django_id': 2,
'django_ct': 'haystack.anothermockmodel',
'stored': 'I am stored data. How fun.',
}
# The str(1) bit might seem unnecessary but it avoids test_unicode needing to handle
# the differences between repr() output on Python 2 and 3 for a unicode literal:
self.no_data_sr = MockSearchResult('haystack', 'mockmodel', str(1), 2)
self.extra_data_sr = MockSearchResult('haystack', 'mockmodel', str(1), 3, **self.extra_data)
self.no_overwrite_data_sr = MockSearchResult('haystack', 'mockmodel', str(1), 4,
**self.no_overwrite_data)
def test_init(self):
self.assertEqual(self.no_data_sr.app_label, 'haystack')
self.assertEqual(self.no_data_sr.model_name, 'mockmodel')
self.assertEqual(self.no_data_sr.model, MockModel)
self.assertEqual(self.no_data_sr.verbose_name, u'Mock model')
self.assertEqual(self.no_data_sr.verbose_name_plural, u'Mock models')
self.assertEqual(self.no_data_sr.pk, '1')
self.assertEqual(self.no_data_sr.score, 2)
self.assertEqual(self.no_data_sr.stored, None)
self.assertEqual(self.extra_data_sr.app_label, 'haystack')
self.assertEqual(self.extra_data_sr.model_name, 'mockmodel')
self.assertEqual(self.extra_data_sr.model, MockModel)
self.assertEqual(self.extra_data_sr.verbose_name, u'Mock model')
self.assertEqual(self.extra_data_sr.verbose_name_plural, u'Mock models')
self.assertEqual(self.extra_data_sr.pk, '1')
self.assertEqual(self.extra_data_sr.score, 3)
self.assertEqual(self.extra_data_sr.stored, 'I am stored data. How fun.')
self.assertEqual(self.no_overwrite_data_sr.app_label, 'haystack')
self.assertEqual(self.no_overwrite_data_sr.model_name, 'mockmodel')
self.assertEqual(self.no_overwrite_data_sr.model, MockModel)
self.assertEqual(self.no_overwrite_data_sr.verbose_name, u'Mock model')
self.assertEqual(self.no_overwrite_data_sr.verbose_name_plural, u'Mock models')
self.assertEqual(self.no_overwrite_data_sr.pk, '1')
self.assertEqual(self.no_overwrite_data_sr.score, 4)
self.assertEqual(self.no_overwrite_data_sr.stored, 'I am stored data. How fun.')
def test_get_additional_fields(self):
self.assertEqual(self.no_data_sr.get_additional_fields(), {})
self.assertEqual(self.extra_data_sr.get_additional_fields(), {'stored': 'I am stored data. How fun.'})
self.assertEqual(self.no_overwrite_data_sr.get_additional_fields(),
{'django_ct': 'haystack.anothermockmodel',
'django_id': 2,
'stored': 'I am stored data. How fun.'})
def test_unicode(self):
self.assertEqual(self.no_data_sr.__unicode__(), u"<SearchResult: haystack.mockmodel (pk='1')>")
self.assertEqual(self.extra_data_sr.__unicode__(), u"<SearchResult: haystack.mockmodel (pk='1')>")
self.assertEqual(self.no_overwrite_data_sr.__unicode__(),
u"<SearchResult: haystack.mockmodel (pk='1')>")
def test_content_type(self):
self.assertEqual(self.no_data_sr.content_type(), u'core.mockmodel')
self.assertEqual(self.extra_data_sr.content_type(), u'core.mockmodel')
self.assertEqual(self.no_overwrite_data_sr.content_type(), u'core.mockmodel')
def test_stored_fields(self):
# Stow.
old_unified_index = connections['default']._index
ui = UnifiedIndex()
ui.build(indexes=[])
connections['default']._index = ui
# Without registering, we should receive an empty dict.
self.assertEqual(self.no_data_sr.get_stored_fields(), {})
self.assertEqual(self.extra_data_sr.get_stored_fields(), {})
self.assertEqual(self.no_overwrite_data_sr.get_stored_fields(), {})
from haystack import indexes
class TestSearchIndex(indexes.SearchIndex, indexes.Indexable):
stored = indexes.CharField(model_attr='author', document=True)
def get_model(self):
return MockModel
# Include the index & try again.
ui.document_field = 'stored'
ui.build(indexes=[TestSearchIndex()])
self.assertEqual(self.no_data_sr.get_stored_fields(), {'stored': None})
self.assertEqual(self.extra_data_sr.get_stored_fields(), {'stored': 'I am stored data. How fun.'})
self.assertEqual(self.no_overwrite_data_sr.get_stored_fields(),
{'stored': 'I am stored data. How fun.'})
# Restore.
connections['default']._index = old_unified_index
def test_missing_object(self):
awol1 = SearchResult('core', 'mockmodel', '1000000', 2)
self.assertEqual(awol1.app_label, 'core')
self.assertEqual(awol1.model_name, 'mockmodel')
self.assertEqual(awol1.pk, '1000000')
self.assertEqual(awol1.score, 2)
awol2 = SearchResult('core', 'yetanothermockmodel', '1000000', 2)
self.assertEqual(awol2.app_label, 'core')
self.assertEqual(awol2.model_name, 'yetanothermockmodel')
self.assertEqual(awol2.pk, '1000000')
self.assertEqual(awol2.score, 2)
# Failed lookups should fail gracefully.
CaptureHandler.logs_seen = []
self.assertEqual(awol1.model, MockModel)
self.assertEqual(awol1.object, None)
self.assertEqual(awol1.verbose_name, u'Mock model')
self.assertEqual(awol1.verbose_name_plural, u'Mock models')
self.assertEqual(awol1.stored, None)
self.assertEqual(len(CaptureHandler.logs_seen), 4)
CaptureHandler.logs_seen = []
self.assertEqual(awol2.model, None)
self.assertEqual(awol2.object, None)
self.assertEqual(awol2.verbose_name, u'')
self.assertEqual(awol2.verbose_name_plural, u'')
self.assertEqual(awol2.stored, None)
self.assertEqual(len(CaptureHandler.logs_seen), 12)
def test_read_queryset(self):
# The model is flagged deleted so not returned by the default manager.
deleted1 = SearchResult('core', 'afifthmockmodel', 2, 2)
self.assertEqual(deleted1.object, None)
# Stow.
old_unified_index = connections['default']._index
ui = UnifiedIndex()
ui.document_field = 'author'
ui.build(indexes=[ReadQuerySetTestSearchIndex()])
connections['default']._index = ui
# The soft delete manager returns the object.
deleted2 = SearchResult('core', 'afifthmockmodel', 2, 2)
self.assertNotEqual(deleted2.object, None)
self.assertEqual(deleted2.object.author, 'sam2')
# Restore.
connections['default']._index = old_unified_index
def test_pickling(self):
pickle_me_1 = SearchResult('core', 'mockmodel', '1000000', 2)
picklicious = pickle.dumps(pickle_me_1)
pickle_me_2 = pickle.loads(picklicious)
self.assertEqual(pickle_me_1.app_label, pickle_me_2.app_label)
self.assertEqual(pickle_me_1.model_name, pickle_me_2.model_name)
self.assertEqual(pickle_me_1.pk, pickle_me_2.pk)
self.assertEqual(pickle_me_1.score, pickle_me_2.score)
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import logging as std_logging
import pickle
from django.test import TestCase
from test_haystack.core.models import MockModel
from haystack import connections
from haystack.models import SearchResult
from haystack.utils import log as logging
from haystack.utils.loading import UnifiedIndex
from .mocks import MockSearchResult
from .test_indexes import ReadQuerySetTestSearchIndex
class CaptureHandler(std_logging.Handler):
logs_seen = []
def emit(self, record):
CaptureHandler.logs_seen.append(record)
class SearchResultTestCase(TestCase):
fixtures = ['base_data']
def setUp(self):
super(SearchResultTestCase, self).setUp()
cap = CaptureHandler()
logging.getLogger('haystack').addHandler(cap)
self.no_data = {}
self.extra_data = {
'stored': 'I am stored data. How fun.',
}
self.no_overwrite_data = {
'django_id': 2,
'django_ct': 'haystack.anothermockmodel',
'stored': 'I am stored data. How fun.',
}
# The str(1) bit might seem unnecessary but it avoids test_unicode needing to handle
# the differences between repr() output on Python 2 and 3 for a unicode literal:
self.no_data_sr = MockSearchResult('haystack', 'mockmodel', str(1), 2)
self.extra_data_sr = MockSearchResult('haystack', 'mockmodel', str(1), 3, **self.extra_data)
self.no_overwrite_data_sr = MockSearchResult('haystack', 'mockmodel', str(1), 4,
**self.no_overwrite_data)
def test_init(self):
self.assertEqual(self.no_data_sr.app_label, 'haystack')
self.assertEqual(self.no_data_sr.model_name, 'mockmodel')
self.assertEqual(self.no_data_sr.model, MockModel)
self.assertEqual(self.no_data_sr.verbose_name, u'Mock model')
self.assertEqual(self.no_data_sr.verbose_name_plural, u'Mock models')
self.assertEqual(self.no_data_sr.pk, '1')
self.assertEqual(self.no_data_sr.score, 2)
self.assertEqual(self.no_data_sr.stored, None)
self.assertEqual(self.extra_data_sr.app_label, 'haystack')
self.assertEqual(self.extra_data_sr.model_name, 'mockmodel')
self.assertEqual(self.extra_data_sr.model, MockModel)
self.assertEqual(self.extra_data_sr.verbose_name, u'Mock model')
self.assertEqual(self.extra_data_sr.verbose_name_plural, u'Mock models')
self.assertEqual(self.extra_data_sr.pk, '1')
self.assertEqual(self.extra_data_sr.score, 3)
self.assertEqual(self.extra_data_sr.stored, 'I am stored data. How fun.')
self.assertEqual(self.no_overwrite_data_sr.app_label, 'haystack')
self.assertEqual(self.no_overwrite_data_sr.model_name, 'mockmodel')
self.assertEqual(self.no_overwrite_data_sr.model, MockModel)
self.assertEqual(self.no_overwrite_data_sr.verbose_name, u'Mock model')
self.assertEqual(self.no_overwrite_data_sr.verbose_name_plural, u'Mock models')
self.assertEqual(self.no_overwrite_data_sr.pk, '1')
self.assertEqual(self.no_overwrite_data_sr.score, 4)
self.assertEqual(self.no_overwrite_data_sr.stored, 'I am stored data. How fun.')
def test_get_additional_fields(self):
self.assertEqual(self.no_data_sr.get_additional_fields(), {})
self.assertEqual(self.extra_data_sr.get_additional_fields(), {'stored': 'I am stored data. How fun.'})
self.assertEqual(self.no_overwrite_data_sr.get_additional_fields(),
{'django_ct': 'haystack.anothermockmodel',
'django_id': 2,
'stored': 'I am stored data. How fun.'})
def test_unicode(self):
self.assertEqual(self.no_data_sr.__unicode__(), u"<SearchResult: haystack.mockmodel (pk='1')>")
self.assertEqual(self.extra_data_sr.__unicode__(), u"<SearchResult: haystack.mockmodel (pk='1')>")
self.assertEqual(self.no_overwrite_data_sr.__unicode__(),
u"<SearchResult: haystack.mockmodel (pk='1')>")
def test_content_type(self):
self.assertEqual(self.no_data_sr.content_type(), u'core.mockmodel')
self.assertEqual(self.extra_data_sr.content_type(), u'core.mockmodel')
self.assertEqual(self.no_overwrite_data_sr.content_type(), u'core.mockmodel')
def test_stored_fields(self):
# Stow.
old_unified_index = connections['default']._index
ui = UnifiedIndex()
ui.build(indexes=[])
connections['default']._index = ui
# Without registering, we should receive an empty dict.
self.assertEqual(self.no_data_sr.get_stored_fields(), {})
self.assertEqual(self.extra_data_sr.get_stored_fields(), {})
self.assertEqual(self.no_overwrite_data_sr.get_stored_fields(), {})
from haystack import indexes
class TestSearchIndex(indexes.SearchIndex, indexes.Indexable):
stored = indexes.CharField(model_attr='author', document=True)
def get_model(self):
return MockModel
# Include the index & try again.
ui.document_field = 'stored'
ui.build(indexes=[TestSearchIndex()])
self.assertEqual(self.no_data_sr.get_stored_fields(), {'stored': None})
self.assertEqual(self.extra_data_sr.get_stored_fields(), {'stored': 'I am stored data. How fun.'})
self.assertEqual(self.no_overwrite_data_sr.get_stored_fields(),
{'stored': 'I am stored data. How fun.'})
# Restore.
connections['default']._index = old_unified_index
def test_missing_object(self):
awol1 = SearchResult('core', 'mockmodel', '1000000', 2)
self.assertEqual(awol1.app_label, 'core')
self.assertEqual(awol1.model_name, 'mockmodel')
self.assertEqual(awol1.pk, '1000000')
self.assertEqual(awol1.score, 2)
awol2 = SearchResult('core', 'yetanothermockmodel', '1000000', 2)
self.assertEqual(awol2.app_label, 'core')
self.assertEqual(awol2.model_name, 'yetanothermockmodel')
self.assertEqual(awol2.pk, '1000000')
self.assertEqual(awol2.score, 2)
# Failed lookups should fail gracefully.
CaptureHandler.logs_seen = []
self.assertEqual(awol1.model, MockModel)
self.assertEqual(awol1.object, None)
self.assertEqual(awol1.verbose_name, u'Mock model')
self.assertEqual(awol1.verbose_name_plural, u'Mock models')
self.assertEqual(awol1.stored, None)
self.assertEqual(len(CaptureHandler.logs_seen), 4)
CaptureHandler.logs_seen = []
self.assertEqual(awol2.model, None)
self.assertEqual(awol2.object, None)
self.assertEqual(awol2.verbose_name, u'')
self.assertEqual(awol2.verbose_name_plural, u'')
self.assertEqual(awol2.stored, None)
self.assertEqual(len(CaptureHandler.logs_seen), 12)
def test_read_queryset(self):
# The model is flagged deleted so not returned by the default manager.
deleted1 = SearchResult('core', 'afifthmockmodel', 2, 2)
self.assertEqual(deleted1.object, None)
# Stow.
old_unified_index = connections['default']._index
ui = UnifiedIndex()
ui.document_field = 'author'
ui.build(indexes=[ReadQuerySetTestSearchIndex()])
connections['default']._index = ui
# The soft delete manager returns the object.
deleted2 = SearchResult('core', 'afifthmockmodel', 2, 2)
self.assertNotEqual(deleted2.object, None)
self.assertEqual(deleted2.object.author, 'sam2')
# Restore.
connections['default']._index = old_unified_index
def test_pickling(self):
pickle_me_1 = SearchResult('core', 'mockmodel', '1000000', 2)
picklicious = pickle.dumps(pickle_me_1)
pickle_me_2 = pickle.loads(picklicious)
self.assertEqual(pickle_me_1.app_label, pickle_me_2.app_label)
self.assertEqual(pickle_me_1.model_name, pickle_me_2.model_name)
self.assertEqual(pickle_me_1.pk, pickle_me_2.pk)
self.assertEqual(pickle_me_1.score, pickle_me_2.score)
|
en
| 0.894365
|
# encoding: utf-8 # The str(1) bit might seem unnecessary but it avoids test_unicode needing to handle # the differences between repr() output on Python 2 and 3 for a unicode literal: # Stow. # Without registering, we should receive an empty dict. # Include the index & try again. # Restore. # Failed lookups should fail gracefully. # The model is flagged deleted so not returned by the default manager. # Stow. # The soft delete manager returns the object. # Restore.
| 2.320584
| 2
|
setup.py
|
Aiman-Shaa/PyPi-pakage
| 0
|
6625856
|
import setuptools
setuptools.setup(
name = 'xmath',
version = '0.1',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'xmath module contains many simple and advanced mathematical operations',
packages = setuptools.find_packages(),
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
import setuptools
setuptools.setup(
name = 'xmath',
version = '0.1',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'xmath module contains many simple and advanced mathematical operations',
packages = setuptools.find_packages(),
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
none
| 1
| 1.311812
| 1
|
|
backend/breach/tests/test_views.py
|
nkrios/rupture
| 184
|
6625857
|
<filename>backend/breach/tests/test_views.py
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
from breach.models import Target, Victim, Round, SampleSet
import json
from binascii import hexlify
from mock import patch
class ViewsTestCase(TestCase):
def setUp(self):
self.client = Client()
self.target1 = Target.objects.create(
name='ruptureit',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=1
)
self.target2 = Target.objects.create(
name='ruptureit2',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=2
)
self.target1_data = {
'name': 'ruptureit',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
self.target2_data = {
'name': 'ruptureit2',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 2
}
def test_target_post(self):
"""
Test post requests for /target
"""
# Create the request
data = {
'name': 'ruptureit3',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
response = self.client.post(reverse('TargetView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['target_name'], 'ruptureit3')
def test_target_get(self):
response = self.client.get(reverse('TargetView'))
response_dict1 = {key: json.loads(response.content)['targets'][0][key] for key in self.target1_data}
response_dict2 = {key: json.loads(response.content)['targets'][1][key] for key in self.target2_data}
self.assertEqual(response.status_code, 200)
self.assertEqual(response_dict1, self.target1_data)
self.assertEqual(response_dict2, self.target2_data)
def test_victim_post(self):
"""
Test post requests for /victim
"""
# Create the request
data = {
'sourceip': '192.168.1.5',
}
response = self.client.post(reverse('VictimListView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['victim_id'], 1)
def test_victim_get(self):
victim = Victim.objects.create(
sourceip='192.168.1.5',
target=self.target1
)
round_data = {
'victim': victim,
'index': 1,
'amount': self.target1.samplesize,
'knownalphabet': 'abcdefghijklmnopqrstuvxyz',
'knownsecret': 'imper'
}
new_round = Round(**round_data)
new_round.save()
response = self.client.get(reverse('VictimListView'))
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['victims'][0]['sourceip'], '192.168.1.5')
@patch('breach.models.Victim.attack')
def test_attack_post_noID(self, attack):
"""
Test post requests for /victim
"""
# Create the request
data = {
'sourceip': '192.168.1.6',
'target': self.target1.name
}
response = self.client.post(reverse('AttackView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['victim_id'], 1)
@patch('breach.models.Victim.attack')
def test_attack_post_ID(self, attack):
"""
Test post requests for /victim
"""
victim = Victim.objects.create(
sourceip='192.168.1.5'
)
# Create the request
data = {
'id': victim.id,
'target': self.target1.name
}
response = self.client.post(reverse('AttackView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['victim_id'], victim.id)
def test_victimID_get(self):
victim = Victim.objects.create(
sourceip='192.168.1.5',
target=self.target1
)
Victim.objects.create(
sourceip='192.168.1.6',
target=self.target2
)
round_data = {
'victim': victim,
'index': 1,
'amount': victim.target.samplesize,
'knownalphabet': 'abcdefghijklmnopqrstuvxyz',
'knownsecret': '<PASSWORD>'
}
new_round = Round(**round_data)
new_round.save()
sampleset1_data = {
'round': new_round,
'candidatealphabet': 'a',
'datalength': len(hexlify('length')),
'success': True,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVXYZ'
}
sampleset = SampleSet(**sampleset1_data)
sampleset.save()
sampleset2_data = {
'round': new_round,
'candidatealphabet': 'b',
'datalength': len(hexlify('length2')),
'success': True,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVXYZ'
}
sampleset2 = SampleSet(**sampleset2_data)
sampleset2.save()
response = self.client.get(reverse('VictimDetailView', kwargs={'victim_id': victim.id}))
self.assertEqual(json.loads(response.content)['victim_ip'], '192.168.1.5')
self.assertEqual(json.loads(response.content)['target_name'], 'ruptureit')
self.assertEqual(json.loads(response.content)['attack_details'][0]['batch'], 0)
def test_victimID_patch_state(self):
victim = Victim.objects.create(
sourceip='192.168.1.5',
target=self.target1,
)
data1 = {'state': 'paused'}
data2 = {'state': 'running'}
response = self.client.patch(reverse('VictimDetailView', kwargs={'victim_id': victim.id}), json.dumps(data1), content_type='application/json', )
self.assertEqual(response.status_code, 200)
paused_victim = Victim.objects.get(pk=victim.id)
self.assertEqual(paused_victim.state, 'paused')
response = self.client.patch(reverse('VictimDetailView', kwargs={'victim_id': victim.id}), json.dumps(data2), content_type='application/json', )
restarted_victim = Victim.objects.get(pk=victim.id)
self.assertEqual(restarted_victim.state, 'running')
def test_victimID_patch_delete(self):
victim = Victim.objects.create(
sourceip='192.168.1.5',
target=self.target1,
)
data1 = {'deleted': True}
data2 = {'deleted': False}
response = self.client.patch(reverse('VictimDetailView', kwargs={'victim_id': victim.id}), json.dumps(data1), content_type='application/json', )
self.assertEqual(response.status_code, 200)
deleted_victim = Victim.objects.get(pk=victim.id)
self.assertNotEqual(deleted_victim.trashed_at, None)
response = self.client.patch(reverse('VictimDetailView', kwargs={'victim_id': victim.id}), json.dumps(data2), content_type='application/json', )
restored_victim = Victim.objects.get(pk=victim.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(restored_victim.trashed_at, None)
@patch('breach.helpers.network.scan_network')
def test_victim_notstarted(self, scan_network):
response = self.client.get(reverse('DiscoveredVictimsView'))
self.assertEqual(response.status_code, 200)
|
<filename>backend/breach/tests/test_views.py
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
from breach.models import Target, Victim, Round, SampleSet
import json
from binascii import hexlify
from mock import patch
class ViewsTestCase(TestCase):
def setUp(self):
self.client = Client()
self.target1 = Target.objects.create(
name='ruptureit',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=1
)
self.target2 = Target.objects.create(
name='ruptureit2',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=2
)
self.target1_data = {
'name': 'ruptureit',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
self.target2_data = {
'name': 'ruptureit2',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 2
}
def test_target_post(self):
"""
Test post requests for /target
"""
# Create the request
data = {
'name': 'ruptureit3',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
response = self.client.post(reverse('TargetView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['target_name'], 'ruptureit3')
def test_target_get(self):
response = self.client.get(reverse('TargetView'))
response_dict1 = {key: json.loads(response.content)['targets'][0][key] for key in self.target1_data}
response_dict2 = {key: json.loads(response.content)['targets'][1][key] for key in self.target2_data}
self.assertEqual(response.status_code, 200)
self.assertEqual(response_dict1, self.target1_data)
self.assertEqual(response_dict2, self.target2_data)
def test_victim_post(self):
"""
Test post requests for /victim
"""
# Create the request
data = {
'sourceip': '192.168.1.5',
}
response = self.client.post(reverse('VictimListView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['victim_id'], 1)
def test_victim_get(self):
victim = Victim.objects.create(
sourceip='192.168.1.5',
target=self.target1
)
round_data = {
'victim': victim,
'index': 1,
'amount': self.target1.samplesize,
'knownalphabet': 'abcdefghijklmnopqrstuvxyz',
'knownsecret': 'imper'
}
new_round = Round(**round_data)
new_round.save()
response = self.client.get(reverse('VictimListView'))
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['victims'][0]['sourceip'], '192.168.1.5')
@patch('breach.models.Victim.attack')
def test_attack_post_noID(self, attack):
"""
Test post requests for /victim
"""
# Create the request
data = {
'sourceip': '192.168.1.6',
'target': self.target1.name
}
response = self.client.post(reverse('AttackView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['victim_id'], 1)
@patch('breach.models.Victim.attack')
def test_attack_post_ID(self, attack):
"""
Test post requests for /victim
"""
victim = Victim.objects.create(
sourceip='192.168.1.5'
)
# Create the request
data = {
'id': victim.id,
'target': self.target1.name
}
response = self.client.post(reverse('AttackView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['victim_id'], victim.id)
def test_victimID_get(self):
victim = Victim.objects.create(
sourceip='192.168.1.5',
target=self.target1
)
Victim.objects.create(
sourceip='192.168.1.6',
target=self.target2
)
round_data = {
'victim': victim,
'index': 1,
'amount': victim.target.samplesize,
'knownalphabet': 'abcdefghijklmnopqrstuvxyz',
'knownsecret': '<PASSWORD>'
}
new_round = Round(**round_data)
new_round.save()
sampleset1_data = {
'round': new_round,
'candidatealphabet': 'a',
'datalength': len(hexlify('length')),
'success': True,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVXYZ'
}
sampleset = SampleSet(**sampleset1_data)
sampleset.save()
sampleset2_data = {
'round': new_round,
'candidatealphabet': 'b',
'datalength': len(hexlify('length2')),
'success': True,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVXYZ'
}
sampleset2 = SampleSet(**sampleset2_data)
sampleset2.save()
response = self.client.get(reverse('VictimDetailView', kwargs={'victim_id': victim.id}))
self.assertEqual(json.loads(response.content)['victim_ip'], '192.168.1.5')
self.assertEqual(json.loads(response.content)['target_name'], 'ruptureit')
self.assertEqual(json.loads(response.content)['attack_details'][0]['batch'], 0)
def test_victimID_patch_state(self):
victim = Victim.objects.create(
sourceip='192.168.1.5',
target=self.target1,
)
data1 = {'state': 'paused'}
data2 = {'state': 'running'}
response = self.client.patch(reverse('VictimDetailView', kwargs={'victim_id': victim.id}), json.dumps(data1), content_type='application/json', )
self.assertEqual(response.status_code, 200)
paused_victim = Victim.objects.get(pk=victim.id)
self.assertEqual(paused_victim.state, 'paused')
response = self.client.patch(reverse('VictimDetailView', kwargs={'victim_id': victim.id}), json.dumps(data2), content_type='application/json', )
restarted_victim = Victim.objects.get(pk=victim.id)
self.assertEqual(restarted_victim.state, 'running')
def test_victimID_patch_delete(self):
victim = Victim.objects.create(
sourceip='192.168.1.5',
target=self.target1,
)
data1 = {'deleted': True}
data2 = {'deleted': False}
response = self.client.patch(reverse('VictimDetailView', kwargs={'victim_id': victim.id}), json.dumps(data1), content_type='application/json', )
self.assertEqual(response.status_code, 200)
deleted_victim = Victim.objects.get(pk=victim.id)
self.assertNotEqual(deleted_victim.trashed_at, None)
response = self.client.patch(reverse('VictimDetailView', kwargs={'victim_id': victim.id}), json.dumps(data2), content_type='application/json', )
restored_victim = Victim.objects.get(pk=victim.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(restored_victim.trashed_at, None)
@patch('breach.helpers.network.scan_network')
def test_victim_notstarted(self, scan_network):
response = self.client.get(reverse('DiscoveredVictimsView'))
self.assertEqual(response.status_code, 200)
|
en
| 0.720035
|
Test post requests for /target # Create the request Test post requests for /victim # Create the request Test post requests for /victim # Create the request Test post requests for /victim # Create the request
| 2.297182
| 2
|
metrics.py
|
helloholmes/dog_detection_gluoncv
| 2
|
6625858
|
<reponame>helloholmes/dog_detection_gluoncv<filename>metrics.py
# coding:utf-8
'''
python 3.5
mxnet 1.3.0
gluoncv 0.3.0
visdom 0.1.7
gluonbook 0.6.9
auther: helloholmes
'''
import mxnet as mx
import numpy as np
import os
import time
import pickle
from mxnet import gluon
from mxnet import init
from mxnet import nd
from mxnet import autograd
from mxnet.gluon import nn
class RPNAccMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNAccMetric, self).__init__('RPNAcc')
def update(self, labels, preds):
rpn_label, rpn_weight = labels
rpn_cls_logits = preds[0]
num_inst = nd.sum(rpn_weight)
pred_label = nd.sigmoid(rpn_cls_logits) >= 0.5
num_acc = mx.nd.sum((pred_label == rpn_label) * rpn_weight)
self.sum_metric += num_acc.asscalar()
self.num_inst += num_inst.asscalar()
class RPNL1LossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNL1LossMetric, self).__init__('RPNL1Loss')
def update(self, labels, preds):
rpn_bbox_target, rpn_bbox_weight = labels
rpn_bbox_reg = preds[0]
num_inst = nd.sum(rpn_bbox_weight) / 4
loss = nd.sum(rpn_bbox_weight * nd.smooth_l1(rpn_bbox_reg-rpn_bbox_target, scalar=3))
self.sum_metric += loss.asscalar()
self.num_inst += num_inst.asscalar()
class RCNNAccMetric(mx.metric.EvalMetric):
def __init__(self):
super(RCNNAccMetric, self).__init__('RCNNAcc')
def update(self, labels, preds):
rcnn_label = labels[0]
rcnn_cls = preds[0]
pred_label = nd.argmax(rcnn_cls, axis=-1)
num_acc = nd.sum(pred_label == rcnn_label)
self.sum_metric += num_acc.asscalar()
self.num_inst += rcnn_label.size
class RCNNL1LossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RCNNL1LossMetric, self).__init__('RCNNL1Loss')
def updata(self, labels, preds):
rcnn_bbox_taregt, rcnn_bbox_weight = labels
rcnn_bbox_reg = preds[0]
num_inst = nd.sum(rcnn_bbox_weight) / 4
loss = nd.sum(rcnn_bbox_weight * nd.smooth_l1(rcnn_bbox_reg-rcnn_bbox_taregt, scalar=1))
self.sum_metric += loss.asscalar()
self.num_inst += num_inst.asscalar()
|
# coding:utf-8
'''
python 3.5
mxnet 1.3.0
gluoncv 0.3.0
visdom 0.1.7
gluonbook 0.6.9
auther: helloholmes
'''
import mxnet as mx
import numpy as np
import os
import time
import pickle
from mxnet import gluon
from mxnet import init
from mxnet import nd
from mxnet import autograd
from mxnet.gluon import nn
class RPNAccMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNAccMetric, self).__init__('RPNAcc')
def update(self, labels, preds):
rpn_label, rpn_weight = labels
rpn_cls_logits = preds[0]
num_inst = nd.sum(rpn_weight)
pred_label = nd.sigmoid(rpn_cls_logits) >= 0.5
num_acc = mx.nd.sum((pred_label == rpn_label) * rpn_weight)
self.sum_metric += num_acc.asscalar()
self.num_inst += num_inst.asscalar()
class RPNL1LossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNL1LossMetric, self).__init__('RPNL1Loss')
def update(self, labels, preds):
rpn_bbox_target, rpn_bbox_weight = labels
rpn_bbox_reg = preds[0]
num_inst = nd.sum(rpn_bbox_weight) / 4
loss = nd.sum(rpn_bbox_weight * nd.smooth_l1(rpn_bbox_reg-rpn_bbox_target, scalar=3))
self.sum_metric += loss.asscalar()
self.num_inst += num_inst.asscalar()
class RCNNAccMetric(mx.metric.EvalMetric):
def __init__(self):
super(RCNNAccMetric, self).__init__('RCNNAcc')
def update(self, labels, preds):
rcnn_label = labels[0]
rcnn_cls = preds[0]
pred_label = nd.argmax(rcnn_cls, axis=-1)
num_acc = nd.sum(pred_label == rcnn_label)
self.sum_metric += num_acc.asscalar()
self.num_inst += rcnn_label.size
class RCNNL1LossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RCNNL1LossMetric, self).__init__('RCNNL1Loss')
def updata(self, labels, preds):
rcnn_bbox_taregt, rcnn_bbox_weight = labels
rcnn_bbox_reg = preds[0]
num_inst = nd.sum(rcnn_bbox_weight) / 4
loss = nd.sum(rcnn_bbox_weight * nd.smooth_l1(rcnn_bbox_reg-rcnn_bbox_taregt, scalar=1))
self.sum_metric += loss.asscalar()
self.num_inst += num_inst.asscalar()
|
en
| 0.307443
|
# coding:utf-8 python 3.5 mxnet 1.3.0 gluoncv 0.3.0 visdom 0.1.7 gluonbook 0.6.9 auther: helloholmes
| 2.03533
| 2
|
app/modules/encounters/schemas.py
|
karenc/houston
| 0
|
6625859
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Serialization schemas for Encounters resources RESTful API
----------------------------------------------------
"""
from flask_marshmallow import base_fields
from flask_restx_patched import ModelSchema
from .models import Encounter
class BaseEncounterSchema(ModelSchema):
"""
Base Encounter schema exposes only the most general fields.
"""
annotations = base_fields.Nested('BaseAnnotationSchema', many=True)
submitter = base_fields.Nested('PublicUserSchema', many=False)
owner = base_fields.Nested('PublicUserSchema', many=False)
hasView = base_fields.Function(Encounter.current_user_has_view_permission)
hasEdit = base_fields.Function(Encounter.current_user_has_edit_permission)
class Meta:
# pylint: disable=missing-docstring
model = Encounter
fields = (Encounter.guid.key,)
dump_only = (Encounter.guid.key,)
class DetailedEncounterSchema(BaseEncounterSchema):
"""
Detailed Encounter schema exposes all useful fields.
"""
class Meta(BaseEncounterSchema.Meta):
fields = BaseEncounterSchema.Meta.fields + (
Encounter.created.key,
Encounter.updated.key,
Encounter.owner_guid.key,
Encounter.public.key,
Encounter.annotations.key,
Encounter.owner.key,
Encounter.submitter.key,
)
dump_only = BaseEncounterSchema.Meta.dump_only + (
Encounter.created.key,
Encounter.updated.key,
)
class AugmentedEdmEncounterSchema(BaseEncounterSchema):
annotations = base_fields.Nested(
'BaseAnnotationSchema', many=True, only=('guid', 'asset_guid', 'ia_class')
)
createdHouston = base_fields.DateTime(attribute='created')
updatedHouston = base_fields.DateTime(attribute='updated')
class Meta(BaseEncounterSchema.Meta):
fields = BaseEncounterSchema.Meta.fields + (
'createdHouston',
'updatedHouston',
Encounter.owner.key,
Encounter.submitter.key,
'hasView',
'hasEdit',
'annotations',
)
|
# -*- coding: utf-8 -*-
"""
Serialization schemas for Encounters resources RESTful API
----------------------------------------------------
"""
from flask_marshmallow import base_fields
from flask_restx_patched import ModelSchema
from .models import Encounter
class BaseEncounterSchema(ModelSchema):
"""
Base Encounter schema exposes only the most general fields.
"""
annotations = base_fields.Nested('BaseAnnotationSchema', many=True)
submitter = base_fields.Nested('PublicUserSchema', many=False)
owner = base_fields.Nested('PublicUserSchema', many=False)
hasView = base_fields.Function(Encounter.current_user_has_view_permission)
hasEdit = base_fields.Function(Encounter.current_user_has_edit_permission)
class Meta:
# pylint: disable=missing-docstring
model = Encounter
fields = (Encounter.guid.key,)
dump_only = (Encounter.guid.key,)
class DetailedEncounterSchema(BaseEncounterSchema):
"""
Detailed Encounter schema exposes all useful fields.
"""
class Meta(BaseEncounterSchema.Meta):
fields = BaseEncounterSchema.Meta.fields + (
Encounter.created.key,
Encounter.updated.key,
Encounter.owner_guid.key,
Encounter.public.key,
Encounter.annotations.key,
Encounter.owner.key,
Encounter.submitter.key,
)
dump_only = BaseEncounterSchema.Meta.dump_only + (
Encounter.created.key,
Encounter.updated.key,
)
class AugmentedEdmEncounterSchema(BaseEncounterSchema):
annotations = base_fields.Nested(
'BaseAnnotationSchema', many=True, only=('guid', 'asset_guid', 'ia_class')
)
createdHouston = base_fields.DateTime(attribute='created')
updatedHouston = base_fields.DateTime(attribute='updated')
class Meta(BaseEncounterSchema.Meta):
fields = BaseEncounterSchema.Meta.fields + (
'createdHouston',
'updatedHouston',
Encounter.owner.key,
Encounter.submitter.key,
'hasView',
'hasEdit',
'annotations',
)
|
en
| 0.618408
|
# -*- coding: utf-8 -*- Serialization schemas for Encounters resources RESTful API ---------------------------------------------------- Base Encounter schema exposes only the most general fields. # pylint: disable=missing-docstring Detailed Encounter schema exposes all useful fields.
| 2.13557
| 2
|
antareslauncher/use_cases/check_remote_queue/slurm_queue_show.py
|
AntaresSimulatorTeam/antares-launcher
| 0
|
6625860
|
from dataclasses import dataclass
from antareslauncher.display.idisplay import IDisplay
from antareslauncher.remote_environnement.iremote_environment import (
IRemoteEnvironment,
)
@dataclass
class SlurmQueueShow:
env: IRemoteEnvironment
display: IDisplay
def run(self):
"""Displays all the jobs un the slurm queue"""
message = "Checking remote server queue\n" + self.env.get_queue_info()
self.display.show_message(message, __name__ + "." + __class__.__name__)
|
from dataclasses import dataclass
from antareslauncher.display.idisplay import IDisplay
from antareslauncher.remote_environnement.iremote_environment import (
IRemoteEnvironment,
)
@dataclass
class SlurmQueueShow:
env: IRemoteEnvironment
display: IDisplay
def run(self):
"""Displays all the jobs un the slurm queue"""
message = "Checking remote server queue\n" + self.env.get_queue_info()
self.display.show_message(message, __name__ + "." + __class__.__name__)
|
en
| 0.247984
|
Displays all the jobs un the slurm queue
| 2.260015
| 2
|
tests/test_misc.py
|
vkleen/skidl
| 700
|
6625861
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
# The MIT License (MIT) - Copyright (c) 2016-2021 <NAME>.
import pytest
from skidl import Net, Part
from .setup_teardown import setup_function, teardown_function
def test_string_indices_1():
vreg1 = Part("xess.lib", "1117", footprint="null")
gnd = Net("GND")
vin = Net("Vin")
vreg1["GND, IN, OUT"] += gnd, vin, vreg1["HS"]
assert vreg1.is_connected() == True
assert len(gnd) == 1
assert len(vin) == 1
assert len(vreg1["IN"].net) == 1
assert len(vreg1["HS"].net) == 2
assert len(vreg1["OUT"].net) == 2
assert vreg1["OUT"].net.is_attached(vreg1["HS"].net)
|
# -*- coding: utf-8 -*-
# The MIT License (MIT) - Copyright (c) 2016-2021 <NAME>.
import pytest
from skidl import Net, Part
from .setup_teardown import setup_function, teardown_function
def test_string_indices_1():
vreg1 = Part("xess.lib", "1117", footprint="null")
gnd = Net("GND")
vin = Net("Vin")
vreg1["GND, IN, OUT"] += gnd, vin, vreg1["HS"]
assert vreg1.is_connected() == True
assert len(gnd) == 1
assert len(vin) == 1
assert len(vreg1["IN"].net) == 1
assert len(vreg1["HS"].net) == 2
assert len(vreg1["OUT"].net) == 2
assert vreg1["OUT"].net.is_attached(vreg1["HS"].net)
|
en
| 0.675338
|
# -*- coding: utf-8 -*- # The MIT License (MIT) - Copyright (c) 2016-2021 <NAME>.
| 2.269826
| 2
|
python/cugraph/structure/renumber.py
|
codereport/cugraph
| 0
|
6625862
|
<reponame>codereport/cugraph<gh_stars>0
# Copyright (c) 2019, <NAME>.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cudf
from collections import OrderedDict
from cugraph.structure import graph_new_wrapper
from cugraph.structure import graph as csg
def renumber(source_col, dest_col):
"""
Take a (potentially sparse) set of source and destination vertex ids and
renumber the vertices to create a dense set of vertex ids using all values
contiguously from 0 to the number of unique vertices - 1.
Input columns can be either int64 or int32. The output will be mapped to
int32, since many of the cugraph functions are limited to int32. If the
number of unique values in source_col and dest_col > 2^31-1 then this
function will return an error.
Return from this call will be three cudf Series - the renumbered
source_col, the renumbered dest_col and a numbering map that maps the new
ids to the original ids.
Parameters
----------
source_col : cudf.Series
This cudf.Series wraps a gdf_column of size E (E: number of edges).
The gdf column contains the source index for each edge.
Source indices must be an integer type.
dest_col : cudf.Series
This cudf.Series wraps a gdf_column of size E (E: number of edges).
The gdf column contains the destination index for each edge.
Destination indices must be an integer type.
numbering_map : cudf.Series
This cudf.Series wraps a gdf column of size V (V: number of vertices).
The gdf column contains a numbering map that maps the new ids to the
original ids.
Examples
--------
>>> M = cudf.read_csv('datasets/karate.csv', delimiter=' ',
>>> dtype=['int32', 'int32', 'float32'], header=None)
>>> sources = cudf.Series(M['0'])
>>> destinations = cudf.Series(M['1'])
>>> source_col, dest_col, numbering_map = cugraph.renumber(sources,
>>> destinations)
>>> G = cugraph.Graph()
>>> G.add_edge_list(source_col, dest_col, None)
"""
csg.null_check(source_col)
csg.null_check(dest_col)
(source_col, dest_col,
numbering_map) = graph_new_wrapper.renumber(source_col, dest_col)
return source_col, dest_col, numbering_map
def renumber_from_cudf(_df, source_cols_names, dest_cols_names):
"""
Take a set, collection (lists) of source and destination columns, and
renumber the vertices to create a dense set of contiguously vertex ids
from 0 to the number of unique vertices - 1.
Input columns can be any data type.
The output will be mapped to int32, since many of the cugraph functions
are limited to int32. If the number of unique values is > 2^31-1 then
this function will return an error.
NOTICE
---------
- The number of source and destination columns must be the same
- The source and destination column names cannot be the same or overlap.
- The data type order needs to be the same between source and destination
columns. This is due to the two sets being merged to create a single
list of all possible values
Input Parameters
----------
df : cudf.DataFrame
The dataframe containing the source and destination columans
source_cols_names : List
This is a list of source column names
dest_cols_names : List
This is a list of destination column names
Returns
---------
src_ids : cudf.Series
The new source vertex IDs
dst_ids : cudf.Series
The new destination vertex IDs
numbering_df : cudf.DataFrame
a dataframe that maps a vertex ID to the unique values
Examples
--------
>>> gdf = cudf.read_csv('datasets/karate.csv', delimiter=' ',
>>> dtype=['int32', 'int32', 'float32'], header=None)
>>> source_col, dest_col, numbering_map =
>>> cugraph.renumber_from_cudf(gdf, ["0"], ["1"])
>>>
>>> G = cugraph.Graph()
>>> G.add_edge_list(source_col, dest_col, None)
"""
if len(source_cols_names) == 0:
raise ValueError('Source column list is empty')
if len(dest_cols_names) == 0:
raise ValueError('Destination column list is empty')
if len(source_cols_names) != len(dest_cols_names):
raise ValueError(
'Source and Destination column lists are not the same size')
# ---------------------------------------------------
# get the source column names and map to indexes
_src_map = OrderedDict()
for i in range(len(source_cols_names)):
_src_map.update({source_cols_names[i]: str(i)})
_tmp_df_src = _df[source_cols_names].rename(_src_map).reset_index()
# --------------------------------------------------------
# get the destination column names and map to indexes
_dst_map = OrderedDict()
for i in range(len(dest_cols_names)):
_dst_map.update({dest_cols_names[i]: str(i)})
_tmp_df_dst = _df[dest_cols_names].rename(_dst_map).reset_index()
_vals = list(_src_map.values())
# ------------------------------------
_s = _tmp_df_src.drop('index').drop_duplicates()
_d = _tmp_df_dst.drop('index').drop_duplicates()
_tmp_df = cudf.concat([_s, _d])
_tmp_df = _tmp_df.drop_duplicates().reset_index().drop('index')
if len(_tmp_df) > np.iinfo(np.int32).max:
raise ValueError('dataset is larger than int32')
_tmp_df['id'] = _tmp_df.index.astype(np.int32)
del _s
del _d
_src_ids = _tmp_df_src.merge(
_tmp_df, on=_vals, how='left').drop(_vals).sort_values(by='index')
_dst_ids = _tmp_df_dst.merge(
_tmp_df, on=_vals, how='left').drop(_vals).sort_values(by='index')
_s_id = cudf.Series(_src_ids['id']).reset_index(drop=True)
_d_id = cudf.Series(_dst_ids['id']).reset_index(drop=True)
del _src_ids
del _dst_ids
return _s_id, _d_id, _tmp_df
|
# Copyright (c) 2019, <NAME>.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cudf
from collections import OrderedDict
from cugraph.structure import graph_new_wrapper
from cugraph.structure import graph as csg
def renumber(source_col, dest_col):
"""
Take a (potentially sparse) set of source and destination vertex ids and
renumber the vertices to create a dense set of vertex ids using all values
contiguously from 0 to the number of unique vertices - 1.
Input columns can be either int64 or int32. The output will be mapped to
int32, since many of the cugraph functions are limited to int32. If the
number of unique values in source_col and dest_col > 2^31-1 then this
function will return an error.
Return from this call will be three cudf Series - the renumbered
source_col, the renumbered dest_col and a numbering map that maps the new
ids to the original ids.
Parameters
----------
source_col : cudf.Series
This cudf.Series wraps a gdf_column of size E (E: number of edges).
The gdf column contains the source index for each edge.
Source indices must be an integer type.
dest_col : cudf.Series
This cudf.Series wraps a gdf_column of size E (E: number of edges).
The gdf column contains the destination index for each edge.
Destination indices must be an integer type.
numbering_map : cudf.Series
This cudf.Series wraps a gdf column of size V (V: number of vertices).
The gdf column contains a numbering map that maps the new ids to the
original ids.
Examples
--------
>>> M = cudf.read_csv('datasets/karate.csv', delimiter=' ',
>>> dtype=['int32', 'int32', 'float32'], header=None)
>>> sources = cudf.Series(M['0'])
>>> destinations = cudf.Series(M['1'])
>>> source_col, dest_col, numbering_map = cugraph.renumber(sources,
>>> destinations)
>>> G = cugraph.Graph()
>>> G.add_edge_list(source_col, dest_col, None)
"""
csg.null_check(source_col)
csg.null_check(dest_col)
(source_col, dest_col,
numbering_map) = graph_new_wrapper.renumber(source_col, dest_col)
return source_col, dest_col, numbering_map
def renumber_from_cudf(_df, source_cols_names, dest_cols_names):
"""
Take a set, collection (lists) of source and destination columns, and
renumber the vertices to create a dense set of contiguously vertex ids
from 0 to the number of unique vertices - 1.
Input columns can be any data type.
The output will be mapped to int32, since many of the cugraph functions
are limited to int32. If the number of unique values is > 2^31-1 then
this function will return an error.
NOTICE
---------
- The number of source and destination columns must be the same
- The source and destination column names cannot be the same or overlap.
- The data type order needs to be the same between source and destination
columns. This is due to the two sets being merged to create a single
list of all possible values
Input Parameters
----------
df : cudf.DataFrame
The dataframe containing the source and destination columans
source_cols_names : List
This is a list of source column names
dest_cols_names : List
This is a list of destination column names
Returns
---------
src_ids : cudf.Series
The new source vertex IDs
dst_ids : cudf.Series
The new destination vertex IDs
numbering_df : cudf.DataFrame
a dataframe that maps a vertex ID to the unique values
Examples
--------
>>> gdf = cudf.read_csv('datasets/karate.csv', delimiter=' ',
>>> dtype=['int32', 'int32', 'float32'], header=None)
>>> source_col, dest_col, numbering_map =
>>> cugraph.renumber_from_cudf(gdf, ["0"], ["1"])
>>>
>>> G = cugraph.Graph()
>>> G.add_edge_list(source_col, dest_col, None)
"""
if len(source_cols_names) == 0:
raise ValueError('Source column list is empty')
if len(dest_cols_names) == 0:
raise ValueError('Destination column list is empty')
if len(source_cols_names) != len(dest_cols_names):
raise ValueError(
'Source and Destination column lists are not the same size')
# ---------------------------------------------------
# get the source column names and map to indexes
_src_map = OrderedDict()
for i in range(len(source_cols_names)):
_src_map.update({source_cols_names[i]: str(i)})
_tmp_df_src = _df[source_cols_names].rename(_src_map).reset_index()
# --------------------------------------------------------
# get the destination column names and map to indexes
_dst_map = OrderedDict()
for i in range(len(dest_cols_names)):
_dst_map.update({dest_cols_names[i]: str(i)})
_tmp_df_dst = _df[dest_cols_names].rename(_dst_map).reset_index()
_vals = list(_src_map.values())
# ------------------------------------
_s = _tmp_df_src.drop('index').drop_duplicates()
_d = _tmp_df_dst.drop('index').drop_duplicates()
_tmp_df = cudf.concat([_s, _d])
_tmp_df = _tmp_df.drop_duplicates().reset_index().drop('index')
if len(_tmp_df) > np.iinfo(np.int32).max:
raise ValueError('dataset is larger than int32')
_tmp_df['id'] = _tmp_df.index.astype(np.int32)
del _s
del _d
_src_ids = _tmp_df_src.merge(
_tmp_df, on=_vals, how='left').drop(_vals).sort_values(by='index')
_dst_ids = _tmp_df_dst.merge(
_tmp_df, on=_vals, how='left').drop(_vals).sort_values(by='index')
_s_id = cudf.Series(_src_ids['id']).reset_index(drop=True)
_d_id = cudf.Series(_dst_ids['id']).reset_index(drop=True)
del _src_ids
del _dst_ids
return _s_id, _d_id, _tmp_df
|
en
| 0.639223
|
# Copyright (c) 2019, <NAME>. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Take a (potentially sparse) set of source and destination vertex ids and renumber the vertices to create a dense set of vertex ids using all values contiguously from 0 to the number of unique vertices - 1. Input columns can be either int64 or int32. The output will be mapped to int32, since many of the cugraph functions are limited to int32. If the number of unique values in source_col and dest_col > 2^31-1 then this function will return an error. Return from this call will be three cudf Series - the renumbered source_col, the renumbered dest_col and a numbering map that maps the new ids to the original ids. Parameters ---------- source_col : cudf.Series This cudf.Series wraps a gdf_column of size E (E: number of edges). The gdf column contains the source index for each edge. Source indices must be an integer type. dest_col : cudf.Series This cudf.Series wraps a gdf_column of size E (E: number of edges). The gdf column contains the destination index for each edge. Destination indices must be an integer type. numbering_map : cudf.Series This cudf.Series wraps a gdf column of size V (V: number of vertices). The gdf column contains a numbering map that maps the new ids to the original ids. Examples -------- >>> M = cudf.read_csv('datasets/karate.csv', delimiter=' ', >>> dtype=['int32', 'int32', 'float32'], header=None) >>> sources = cudf.Series(M['0']) >>> destinations = cudf.Series(M['1']) >>> source_col, dest_col, numbering_map = cugraph.renumber(sources, >>> destinations) >>> G = cugraph.Graph() >>> G.add_edge_list(source_col, dest_col, None) Take a set, collection (lists) of source and destination columns, and renumber the vertices to create a dense set of contiguously vertex ids from 0 to the number of unique vertices - 1. Input columns can be any data type. The output will be mapped to int32, since many of the cugraph functions are limited to int32. If the number of unique values is > 2^31-1 then this function will return an error. NOTICE --------- - The number of source and destination columns must be the same - The source and destination column names cannot be the same or overlap. - The data type order needs to be the same between source and destination columns. This is due to the two sets being merged to create a single list of all possible values Input Parameters ---------- df : cudf.DataFrame The dataframe containing the source and destination columans source_cols_names : List This is a list of source column names dest_cols_names : List This is a list of destination column names Returns --------- src_ids : cudf.Series The new source vertex IDs dst_ids : cudf.Series The new destination vertex IDs numbering_df : cudf.DataFrame a dataframe that maps a vertex ID to the unique values Examples -------- >>> gdf = cudf.read_csv('datasets/karate.csv', delimiter=' ', >>> dtype=['int32', 'int32', 'float32'], header=None) >>> source_col, dest_col, numbering_map = >>> cugraph.renumber_from_cudf(gdf, ["0"], ["1"]) >>> >>> G = cugraph.Graph() >>> G.add_edge_list(source_col, dest_col, None) # --------------------------------------------------- # get the source column names and map to indexes # -------------------------------------------------------- # get the destination column names and map to indexes # ------------------------------------
| 2.576602
| 3
|
tests/use_cases/test_get_location.py
|
trewjames/tdd-chess
| 0
|
6625863
|
<filename>tests/use_cases/test_get_location.py<gh_stars>0
from chess.engine import Move, get_location
from chess.board import Board
from chess.pieces import *
import pytest
def test_start_config_white(start_board):
lst = get_location(start_board)
assert lst == set([(6, 0), (6, 1), (6, 2), (6, 3), (6, 4), (6, 5),
(6, 6), (6, 7), (7, 0), (7, 1), (7, 2), (7, 3),
(7, 4), (7, 5), (7, 6), (7, 7)])
def test_start_config_black():
game = Board(player_white=False)
lst = get_location(game)
assert lst == set([(6, 0), (6, 1), (6, 2), (6, 3), (6, 4), (6, 5),
(6, 6), (6, 7), (7, 0), (7, 1), (7, 2), (7, 3),
(7, 4), (7, 5), (7, 6), (7, 7)])
def test_start_midgame_config():
board = [["br", "bn", "bb", "bq", "bk", "bb", "bn", "br"],
["bp", "bp", "bp", "bp", "--", "bp", "bp", "bp"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "wp", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["wp", "wp", "wp", "--", "wp", "wp", "wp", "wp"],
["wr", "wn", "wb", "wq", "wk", "wb", "wn", "wr"]]
game = Board(array=board)
lst = get_location(game)
assert lst == set([(6, 0), (6, 1), (6, 2), (3, 4), (6, 4), (6, 5),
(6, 6), (6, 7), (7, 0), (7, 1), (7, 2), (7, 3),
(7, 4), (7, 5), (7, 6), (7, 7)])
def test_after_move():
game = Board()
Move((6, 3), (4, 3), game).execute()
Move((1, 4), (3, 4), game).execute()
lst = get_location(game)
assert game[(4, 3)].name == 'wp'
assert lst == set([(6, 0), (6, 1), (6, 2), (4, 3), (6, 4), (6, 5),
(6, 6), (6, 7), (7, 0), (7, 1), (7, 2), (7, 3),
(7, 4), (7, 5), (7, 6), (7, 7)])
# --- Find Pieces --- # noqa
@pytest.mark.parametrize(
"white, coord", [
(True, (7, 4)),
(False, (0, 4))
]
)
def test_find_king(start_board, white, coord):
loc = get_location(start_board, turn_white=white, find_piece=King)
assert loc == coord
@pytest.mark.parametrize(
"white, piece, coord", [
(True, Rook, set([(7, 0), (7, 7)])), # white rooks, both alive
(False, Night, (0, 6)), # black night, one alive
(True, King, (7, 4)), # white king
(False, King, (0, 4)), # black king
(False, Bishop, []) # black bishop, none alive
]
)
def test_find_others(game, white, piece, coord):
loc = get_location(game, turn_white=white, find_piece=piece)
assert loc == coord
@ pytest.fixture
def game():
board = [["br", "--", "--", "bq", "bk", "--", "bn", "br"],
["bp", "bp", "bp", "bp", "--", "bp", "bp", "bp"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "wp", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["wp", "wp", "wp", "--", "wp", "wp", "wp", "wp"],
["wr", "wn", "wb", "wq", "wk", "wb", "wn", "wr"]]
return Board(array=board)
|
<filename>tests/use_cases/test_get_location.py<gh_stars>0
from chess.engine import Move, get_location
from chess.board import Board
from chess.pieces import *
import pytest
def test_start_config_white(start_board):
lst = get_location(start_board)
assert lst == set([(6, 0), (6, 1), (6, 2), (6, 3), (6, 4), (6, 5),
(6, 6), (6, 7), (7, 0), (7, 1), (7, 2), (7, 3),
(7, 4), (7, 5), (7, 6), (7, 7)])
def test_start_config_black():
game = Board(player_white=False)
lst = get_location(game)
assert lst == set([(6, 0), (6, 1), (6, 2), (6, 3), (6, 4), (6, 5),
(6, 6), (6, 7), (7, 0), (7, 1), (7, 2), (7, 3),
(7, 4), (7, 5), (7, 6), (7, 7)])
def test_start_midgame_config():
board = [["br", "bn", "bb", "bq", "bk", "bb", "bn", "br"],
["bp", "bp", "bp", "bp", "--", "bp", "bp", "bp"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "wp", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["wp", "wp", "wp", "--", "wp", "wp", "wp", "wp"],
["wr", "wn", "wb", "wq", "wk", "wb", "wn", "wr"]]
game = Board(array=board)
lst = get_location(game)
assert lst == set([(6, 0), (6, 1), (6, 2), (3, 4), (6, 4), (6, 5),
(6, 6), (6, 7), (7, 0), (7, 1), (7, 2), (7, 3),
(7, 4), (7, 5), (7, 6), (7, 7)])
def test_after_move():
game = Board()
Move((6, 3), (4, 3), game).execute()
Move((1, 4), (3, 4), game).execute()
lst = get_location(game)
assert game[(4, 3)].name == 'wp'
assert lst == set([(6, 0), (6, 1), (6, 2), (4, 3), (6, 4), (6, 5),
(6, 6), (6, 7), (7, 0), (7, 1), (7, 2), (7, 3),
(7, 4), (7, 5), (7, 6), (7, 7)])
# --- Find Pieces --- # noqa
@pytest.mark.parametrize(
"white, coord", [
(True, (7, 4)),
(False, (0, 4))
]
)
def test_find_king(start_board, white, coord):
loc = get_location(start_board, turn_white=white, find_piece=King)
assert loc == coord
@pytest.mark.parametrize(
"white, piece, coord", [
(True, Rook, set([(7, 0), (7, 7)])), # white rooks, both alive
(False, Night, (0, 6)), # black night, one alive
(True, King, (7, 4)), # white king
(False, King, (0, 4)), # black king
(False, Bishop, []) # black bishop, none alive
]
)
def test_find_others(game, white, piece, coord):
loc = get_location(game, turn_white=white, find_piece=piece)
assert loc == coord
@ pytest.fixture
def game():
board = [["br", "--", "--", "bq", "bk", "--", "bn", "br"],
["bp", "bp", "bp", "bp", "--", "bp", "bp", "bp"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "wp", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["wp", "wp", "wp", "--", "wp", "wp", "wp", "wp"],
["wr", "wn", "wb", "wq", "wk", "wb", "wn", "wr"]]
return Board(array=board)
|
en
| 0.7061
|
# --- Find Pieces --- # noqa # white rooks, both alive # black night, one alive # white king # black king # black bishop, none alive
| 2.273567
| 2
|
corehq/apps/users/urls.py
|
SEL-Columbia/commcare-hq
| 1
|
6625864
|
<filename>corehq/apps/users/urls.py
#from django.conf.urls.defaults import patterns, url
from corehq.apps.users.views import DefaultProjectUserSettingsView, EditWebUserView, EditMyAccountDomainView, ListWebUsersView, InviteWebUserView
from corehq.apps.users.views.mobile.groups import EditGroupsView, EditGroupMembersView
from corehq.apps.users.views.mobile.users import UploadCommCareUsers, EditCommCareUserView, ListCommCareUsersView, AsyncListCommCareUsersView, CreateCommCareUserView, ConfirmBillingAccountForExtraUsersView, \
UserUploadStatusView
from django.conf.urls.defaults import *
from corehq.apps.domain.utils import grandfathered_domain_re
urlpatterns = patterns('corehq.apps.users.views',
url(r'^$', DefaultProjectUserSettingsView.as_view(), name=DefaultProjectUserSettingsView.urlname),
url(r'^my_account/$', EditMyAccountDomainView.as_view(), name=EditMyAccountDomainView.urlname),
url(r'^change_password/(?P<login_id>[\w-]+)/$', 'change_password', name="change_password"),
url(r'^domain_accounts/(?P<couch_user_id>[\w-]+)/$', 'domain_accounts', name='domain_accounts'),
url(r'^delete_phone_number/(?P<couch_user_id>[\w-]+)/$',
'delete_phone_number',
name='delete_phone_number'),
url(r'^make_phone_number_default/(?P<couch_user_id>[\w-]+)/$',
'make_phone_number_default',
name='make_phone_number_default'),
url(r'^verify_phone_number/(?P<couch_user_id>[\w-]+)/$',
'verify_phone_number',
name='verify_phone_number'),
url(r'^add_domain_membership/(?P<couch_user_id>[\w-]+)/(?P<domain_name>%s)/$' % grandfathered_domain_re,
'add_domain_membership',
name='add_domain_membership'),
url(r'^web/account/(?P<couch_user_id>[\w-]+)/$', EditWebUserView.as_view(), name=EditWebUserView.urlname),
url(r'^web/remove/(?P<couch_user_id>[\w-]+)/$', 'remove_web_user', name='remove_web_user'),
url(r'^web/undo_remove/(?P<record_id>[\w-]+)/$', 'undo_remove_web_user', name='undo_remove_web_user'),
url(r'^web/invite/$', InviteWebUserView.as_view(), name=InviteWebUserView.urlname),
url(r'^web/reinvite/$', 'reinvite_web_user', name='reinvite_web_user'),
url(r'^web/location_restriction_for_users/$', 'location_restriction_for_users', name='location_restriction_for_users'),
url(r'^web/$', ListWebUsersView.as_view(), name=ListWebUsersView.urlname),
url(r'^join/(?P<invitation_id>[\w-]+)/$', 'accept_invitation', name='domain_accept_invitation'),
url(r'^web/role/save/$', 'post_user_role', name='post_user_role'),
url(r'^web/role/delete/$', 'delete_user_role', name='delete_user_role'),
url(r'^httpdigest/?$', 'test_httpdigest'),
url(r'^audit_logs/$', 'audit_logs', name='user_audit_logs')
) + \
patterns("corehq.apps.users.views.mobile.users",
url(r'^commcare/$', ListCommCareUsersView.as_view(), name=ListCommCareUsersView.urlname),
url(r'^commcare/account/(?P<couch_user_id>[\w-]+)/$', EditCommCareUserView.as_view(), name=EditCommCareUserView.urlname),
url(r'^commcare/account/(?P<couch_user_id>[\w-]+)/user_data/$', 'update_user_data', name='update_user_data'),
url(r'^commcare/account/(?P<couch_user_id>[\w-]+)/groups/$', 'update_user_groups', name='update_user_groups'),
url(r'^commcare/list/$', AsyncListCommCareUsersView.as_view(), name=AsyncListCommCareUsersView.urlname),
url(r'^commcare/archive/(?P<user_id>[\w-]+)/$', 'archive_commcare_user', name='archive_commcare_user'),
url(r'^commcare/unarchive/(?P<user_id>[\w-]+)/$', 'archive_commcare_user', name='unarchive_commcare_user', kwargs={'is_active': True}),
url(r'^commcare/delete/(?P<user_id>[\w-]+)/$', 'delete_commcare_user', name='delete_commcare_user'),
url(r'^commcare/restore/(?P<user_id>[\w-]+)/$', 'restore_commcare_user', name='restore_commcare_user'),
url(r'^commcare/upload/$', UploadCommCareUsers.as_view(), name=UploadCommCareUsers.urlname),
url(r'^commcare/upload/status/(?P<download_id>[0-9a-fA-Z]{25,32})/$', UserUploadStatusView.as_view(),
name=UserUploadStatusView.urlname),
url(r'^commcare/upload/poll/(?P<download_id>[0-9a-fA-Z]{25,32})/$',
'user_upload_job_poll', name='user_upload_job_poll'),
url(r'^commcare/download/$', 'download_commcare_users', name='download_commcare_users'),
url(r'^commcare/set_group/$', 'set_commcare_user_group', name='set_commcare_user_group'),
url(r'^commcare/add_commcare_account/$', CreateCommCareUserView.as_view(), name=CreateCommCareUserView.urlname),
url(r'^commcare/confirm_charges/$', ConfirmBillingAccountForExtraUsersView.as_view(),
name=ConfirmBillingAccountForExtraUsersView.urlname),
) +\
patterns("corehq.apps.users.views.mobile.groups",
url(r'^groups/$', EditGroupsView.as_view(), name=EditGroupsView.urlname),
url(r'^groups/(?P<group_id>[ \w-]+)/$', EditGroupMembersView.as_view(), name=EditGroupMembersView.urlname),
)
|
<filename>corehq/apps/users/urls.py
#from django.conf.urls.defaults import patterns, url
from corehq.apps.users.views import DefaultProjectUserSettingsView, EditWebUserView, EditMyAccountDomainView, ListWebUsersView, InviteWebUserView
from corehq.apps.users.views.mobile.groups import EditGroupsView, EditGroupMembersView
from corehq.apps.users.views.mobile.users import UploadCommCareUsers, EditCommCareUserView, ListCommCareUsersView, AsyncListCommCareUsersView, CreateCommCareUserView, ConfirmBillingAccountForExtraUsersView, \
UserUploadStatusView
from django.conf.urls.defaults import *
from corehq.apps.domain.utils import grandfathered_domain_re
urlpatterns = patterns('corehq.apps.users.views',
url(r'^$', DefaultProjectUserSettingsView.as_view(), name=DefaultProjectUserSettingsView.urlname),
url(r'^my_account/$', EditMyAccountDomainView.as_view(), name=EditMyAccountDomainView.urlname),
url(r'^change_password/(?P<login_id>[\w-]+)/$', 'change_password', name="change_password"),
url(r'^domain_accounts/(?P<couch_user_id>[\w-]+)/$', 'domain_accounts', name='domain_accounts'),
url(r'^delete_phone_number/(?P<couch_user_id>[\w-]+)/$',
'delete_phone_number',
name='delete_phone_number'),
url(r'^make_phone_number_default/(?P<couch_user_id>[\w-]+)/$',
'make_phone_number_default',
name='make_phone_number_default'),
url(r'^verify_phone_number/(?P<couch_user_id>[\w-]+)/$',
'verify_phone_number',
name='verify_phone_number'),
url(r'^add_domain_membership/(?P<couch_user_id>[\w-]+)/(?P<domain_name>%s)/$' % grandfathered_domain_re,
'add_domain_membership',
name='add_domain_membership'),
url(r'^web/account/(?P<couch_user_id>[\w-]+)/$', EditWebUserView.as_view(), name=EditWebUserView.urlname),
url(r'^web/remove/(?P<couch_user_id>[\w-]+)/$', 'remove_web_user', name='remove_web_user'),
url(r'^web/undo_remove/(?P<record_id>[\w-]+)/$', 'undo_remove_web_user', name='undo_remove_web_user'),
url(r'^web/invite/$', InviteWebUserView.as_view(), name=InviteWebUserView.urlname),
url(r'^web/reinvite/$', 'reinvite_web_user', name='reinvite_web_user'),
url(r'^web/location_restriction_for_users/$', 'location_restriction_for_users', name='location_restriction_for_users'),
url(r'^web/$', ListWebUsersView.as_view(), name=ListWebUsersView.urlname),
url(r'^join/(?P<invitation_id>[\w-]+)/$', 'accept_invitation', name='domain_accept_invitation'),
url(r'^web/role/save/$', 'post_user_role', name='post_user_role'),
url(r'^web/role/delete/$', 'delete_user_role', name='delete_user_role'),
url(r'^httpdigest/?$', 'test_httpdigest'),
url(r'^audit_logs/$', 'audit_logs', name='user_audit_logs')
) + \
patterns("corehq.apps.users.views.mobile.users",
url(r'^commcare/$', ListCommCareUsersView.as_view(), name=ListCommCareUsersView.urlname),
url(r'^commcare/account/(?P<couch_user_id>[\w-]+)/$', EditCommCareUserView.as_view(), name=EditCommCareUserView.urlname),
url(r'^commcare/account/(?P<couch_user_id>[\w-]+)/user_data/$', 'update_user_data', name='update_user_data'),
url(r'^commcare/account/(?P<couch_user_id>[\w-]+)/groups/$', 'update_user_groups', name='update_user_groups'),
url(r'^commcare/list/$', AsyncListCommCareUsersView.as_view(), name=AsyncListCommCareUsersView.urlname),
url(r'^commcare/archive/(?P<user_id>[\w-]+)/$', 'archive_commcare_user', name='archive_commcare_user'),
url(r'^commcare/unarchive/(?P<user_id>[\w-]+)/$', 'archive_commcare_user', name='unarchive_commcare_user', kwargs={'is_active': True}),
url(r'^commcare/delete/(?P<user_id>[\w-]+)/$', 'delete_commcare_user', name='delete_commcare_user'),
url(r'^commcare/restore/(?P<user_id>[\w-]+)/$', 'restore_commcare_user', name='restore_commcare_user'),
url(r'^commcare/upload/$', UploadCommCareUsers.as_view(), name=UploadCommCareUsers.urlname),
url(r'^commcare/upload/status/(?P<download_id>[0-9a-fA-Z]{25,32})/$', UserUploadStatusView.as_view(),
name=UserUploadStatusView.urlname),
url(r'^commcare/upload/poll/(?P<download_id>[0-9a-fA-Z]{25,32})/$',
'user_upload_job_poll', name='user_upload_job_poll'),
url(r'^commcare/download/$', 'download_commcare_users', name='download_commcare_users'),
url(r'^commcare/set_group/$', 'set_commcare_user_group', name='set_commcare_user_group'),
url(r'^commcare/add_commcare_account/$', CreateCommCareUserView.as_view(), name=CreateCommCareUserView.urlname),
url(r'^commcare/confirm_charges/$', ConfirmBillingAccountForExtraUsersView.as_view(),
name=ConfirmBillingAccountForExtraUsersView.urlname),
) +\
patterns("corehq.apps.users.views.mobile.groups",
url(r'^groups/$', EditGroupsView.as_view(), name=EditGroupsView.urlname),
url(r'^groups/(?P<group_id>[ \w-]+)/$', EditGroupMembersView.as_view(), name=EditGroupMembersView.urlname),
)
|
en
| 0.162699
|
#from django.conf.urls.defaults import patterns, url
| 1.686858
| 2
|
accounts/views.py
|
mowhammadrezaa/ecommerce
| 0
|
6625865
|
from django.contrib.auth.views import LoginView
from django.shortcuts import render
from accounts.forms import LoginForm, RegisterForm
from django.contrib.auth.models import User
class LoginViewCustom(LoginView):
template_name = 'accounts/login_page.html'
authentication_form = LoginForm
extra_context = {
"title": "Login",
"content": "به صفحه ورود خوش آمدید!",
}
def register_page(request):
register_form = RegisterForm(request.POST or None)
context = {
"title": "Register",
"content": "به صفحه ثبت نام خوش آمدید!",
"register_form": register_form,
}
if register_form.is_valid():
username = register_form.cleaned_data.get("username")
password = <PASSWORD>.cleaned_data.get("<PASSWORD>")
confirm_password = <PASSWORD>_form.cleaned_data.get("<PASSWORD>")
email = register_form.cleaned_data.get("email")
user = User.objects.create_user(username=username, password=password, email=email)
return render(request, "accounts/register_page.html", context)
|
from django.contrib.auth.views import LoginView
from django.shortcuts import render
from accounts.forms import LoginForm, RegisterForm
from django.contrib.auth.models import User
class LoginViewCustom(LoginView):
template_name = 'accounts/login_page.html'
authentication_form = LoginForm
extra_context = {
"title": "Login",
"content": "به صفحه ورود خوش آمدید!",
}
def register_page(request):
register_form = RegisterForm(request.POST or None)
context = {
"title": "Register",
"content": "به صفحه ثبت نام خوش آمدید!",
"register_form": register_form,
}
if register_form.is_valid():
username = register_form.cleaned_data.get("username")
password = <PASSWORD>.cleaned_data.get("<PASSWORD>")
confirm_password = <PASSWORD>_form.cleaned_data.get("<PASSWORD>")
email = register_form.cleaned_data.get("email")
user = User.objects.create_user(username=username, password=password, email=email)
return render(request, "accounts/register_page.html", context)
|
none
| 1
| 2.146585
| 2
|
|
beta/nncf/tensorflow/pruning/callbacks.py
|
xiao1228/nncf
| 0
|
6625866
|
<reponame>xiao1228/nncf<gh_stars>0
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nncf.common.statistics import NNCFStatistics
from beta.nncf.tensorflow.callbacks.statistics_callback import StatisticsCallback
class PruningStatisticsCallback(StatisticsCallback):
"""
Callback for logging pruning compression statistics to tensorboard and stdout.
"""
def _prepare_for_tensorboard(self, nncf_stats: NNCFStatistics):
base_prefix = '2.compression/statistics'
detailed_prefix = '3.compression_details/statistics'
ms = nncf_stats.filter_pruning.model_statistics
tensorboard_stats = {
f'{base_prefix}/pruning_level_for_model': ms.pruning_level,
}
for ls in ms.pruned_layers_summary:
layer_name, pruning_level = ls.name, ls.filter_pruning_level
tensorboard_stats[f'{detailed_prefix}/{layer_name}/pruning_level'] = pruning_level
return tensorboard_stats
|
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nncf.common.statistics import NNCFStatistics
from beta.nncf.tensorflow.callbacks.statistics_callback import StatisticsCallback
class PruningStatisticsCallback(StatisticsCallback):
"""
Callback for logging pruning compression statistics to tensorboard and stdout.
"""
def _prepare_for_tensorboard(self, nncf_stats: NNCFStatistics):
base_prefix = '2.compression/statistics'
detailed_prefix = '3.compression_details/statistics'
ms = nncf_stats.filter_pruning.model_statistics
tensorboard_stats = {
f'{base_prefix}/pruning_level_for_model': ms.pruning_level,
}
for ls in ms.pruned_layers_summary:
layer_name, pruning_level = ls.name, ls.filter_pruning_level
tensorboard_stats[f'{detailed_prefix}/{layer_name}/pruning_level'] = pruning_level
return tensorboard_stats
|
en
| 0.852653
|
Copyright (c) 2021 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Callback for logging pruning compression statistics to tensorboard and stdout.
| 1.915671
| 2
|
services/tests/test_indexing.py
|
hanaahajj/Serviceinfo_hanaa
| 0
|
6625867
|
from django.test import TestCase
from services.tests.factories import ServiceFactory
from services.models import Service
from services.search_indexes import ServiceIndex
class IndexingTest(TestCase):
def setUp(self):
self.service_ar = ServiceFactory(
name_ar='Arabic', name_en='', name_fr='',
description_ar='language-of-Egypt',
status=Service.STATUS_CURRENT
)
self.service_ar.save()
self.service_en = ServiceFactory(
name_en='English', name_ar='', name_fr='',
description_en='language-of-Australia',
status=Service.STATUS_CURRENT
)
self.service_en.save()
self.service_fr = ServiceFactory(
name_fr='French', name_ar='', name_en='',
description_fr='language-of-France',
status=Service.STATUS_CURRENT
)
self.service_fr.save()
self.rejected_service_fr = ServiceFactory(
name_fr='InactiveParis', name_ar='', name_en='',
status=Service.STATUS_REJECTED
)
self.rejected_service_fr.save()
def test_querysets(self):
index = ServiceIndex()
self.assertIn(self.service_ar, index.get_index_queryset('ar'))
self.assertNotIn(self.service_ar, index.get_index_queryset('en'))
self.assertIn(self.service_fr, index.get_index_queryset('fr'))
self.assertNotIn(self.service_fr, index.get_index_queryset('ar'))
self.assertNotIn(self.rejected_service_fr, index.get_index_queryset('fr'))
def test_search_data(self):
index = ServiceIndex()
ar_data = index.get_search_data(self.service_ar, 'ar', None)
self.assertIn('Egypt', ar_data)
en_data = index.get_search_data(self.service_en, 'en', None)
self.assertIn('Australia', en_data)
|
from django.test import TestCase
from services.tests.factories import ServiceFactory
from services.models import Service
from services.search_indexes import ServiceIndex
class IndexingTest(TestCase):
def setUp(self):
self.service_ar = ServiceFactory(
name_ar='Arabic', name_en='', name_fr='',
description_ar='language-of-Egypt',
status=Service.STATUS_CURRENT
)
self.service_ar.save()
self.service_en = ServiceFactory(
name_en='English', name_ar='', name_fr='',
description_en='language-of-Australia',
status=Service.STATUS_CURRENT
)
self.service_en.save()
self.service_fr = ServiceFactory(
name_fr='French', name_ar='', name_en='',
description_fr='language-of-France',
status=Service.STATUS_CURRENT
)
self.service_fr.save()
self.rejected_service_fr = ServiceFactory(
name_fr='InactiveParis', name_ar='', name_en='',
status=Service.STATUS_REJECTED
)
self.rejected_service_fr.save()
def test_querysets(self):
index = ServiceIndex()
self.assertIn(self.service_ar, index.get_index_queryset('ar'))
self.assertNotIn(self.service_ar, index.get_index_queryset('en'))
self.assertIn(self.service_fr, index.get_index_queryset('fr'))
self.assertNotIn(self.service_fr, index.get_index_queryset('ar'))
self.assertNotIn(self.rejected_service_fr, index.get_index_queryset('fr'))
def test_search_data(self):
index = ServiceIndex()
ar_data = index.get_search_data(self.service_ar, 'ar', None)
self.assertIn('Egypt', ar_data)
en_data = index.get_search_data(self.service_en, 'en', None)
self.assertIn('Australia', en_data)
|
none
| 1
| 2.349317
| 2
|
|
monopyly/cards/__init__.py
|
YSabarad/monopyly
| 4
|
6625868
|
<filename>monopyly/cards/__init__.py
from .deck import Deck
from .chance_deck import ChanceDeck
from .community_chest_deck import CommunityChestDeck
from .elected_chairman import ElectedChairman
from .reward_card import RewardCard
from .fine_card import FineCard
from .get_out_of_jail_free import GetOutOfJailFree
from .advance_to import AdvanceTo
from .go_back import GoBack
from .it_is_your_birthday import ItIsYourBirthday
from .ten_pound_fine_or_take_a_chance import TenPoundFineOrTakeAChance
from .go_back_three_spaces import GoBackThreeSpaces
from .go_to_jail_card import GoToJailCard
from .repairs import Repairs
|
<filename>monopyly/cards/__init__.py
from .deck import Deck
from .chance_deck import ChanceDeck
from .community_chest_deck import CommunityChestDeck
from .elected_chairman import ElectedChairman
from .reward_card import RewardCard
from .fine_card import FineCard
from .get_out_of_jail_free import GetOutOfJailFree
from .advance_to import AdvanceTo
from .go_back import GoBack
from .it_is_your_birthday import ItIsYourBirthday
from .ten_pound_fine_or_take_a_chance import TenPoundFineOrTakeAChance
from .go_back_three_spaces import GoBackThreeSpaces
from .go_to_jail_card import GoToJailCard
from .repairs import Repairs
|
none
| 1
| 1.190482
| 1
|
|
tensorflow_federated/python/core/utils/differential_privacy_test.py
|
RosieLiu/federated
| 0
|
6625869
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_privacy
from tensorflow_federated.python.common_libs import anonymous_tuple
from tensorflow_federated.python.common_libs import test
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import intrinsics
from tensorflow_federated.python.core.api import placements
from tensorflow_federated.python.core.impl.executors import default_executor
from tensorflow_federated.python.core.impl.types import placement_literals
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.utils import differential_privacy
def wrap_aggregate_fn(dp_aggregate_fn, sample_value):
tff_types = type_conversions.type_from_tensors(sample_value)
@computations.federated_computation
def run_initialize():
return intrinsics.federated_value(dp_aggregate_fn.initialize(),
placement_literals.SERVER)
@computations.federated_computation(run_initialize.type_signature.result,
computation_types.FederatedType(
tff_types,
placement_literals.CLIENTS))
def run_aggregate(global_state, client_values):
return dp_aggregate_fn(global_state, client_values)
return run_initialize, run_aggregate
class BuildDpQueryTest(test.TestCase):
def test_build_dp_query_basic(self):
query = differential_privacy.build_dp_query(1.0, 2.0, 3.0)
self.assertIsInstance(query, tensorflow_privacy.GaussianAverageQuery)
self.assertEqual(query._numerator._l2_norm_clip, 1.0)
self.assertEqual(query._numerator._stddev, 2.0)
self.assertEqual(query._denominator, 3.0)
def test_build_dp_query_adaptive(self):
ccba = 0.1
query = differential_privacy.build_dp_query(
1.0,
2.0,
3.0,
adaptive_clip_learning_rate=0.05,
target_unclipped_quantile=0.5,
clipped_count_budget_allocation=ccba,
expected_num_clients=10)
self.assertIsInstance(query,
tensorflow_privacy.QuantileAdaptiveClipAverageQuery)
self.assertIsInstance(query._numerator,
tensorflow_privacy.QuantileAdaptiveClipSumQuery)
expected_sum_query_noise_multiplier = 2.0 * (1.0 - ccba)**(-0.5)
self.assertAlmostEqual(query._numerator._noise_multiplier,
expected_sum_query_noise_multiplier)
self.assertEqual(query._denominator, 3.0)
def test_build_dp_query_per_vector(self):
class MockTensor():
def __init__(self, shape):
self.shape = shape
mock_shape = collections.namedtuple('MockShape', ['dims'])
mock_dim = collections.namedtuple('MockDim', ['value'])
mock_model = collections.namedtuple('MockModel', ['weights'])
mock_weights = collections.namedtuple('MockWeights', ['trainable'])
def make_mock_tensor(*dims):
return MockTensor(mock_shape([mock_dim(dim) for dim in dims]))
vectors = collections.OrderedDict([('a', make_mock_tensor(2)),
('b', make_mock_tensor(2, 3)),
('c', make_mock_tensor(1, 3, 4))])
model = mock_model(mock_weights(vectors))
query = differential_privacy.build_dp_query(
1.0, 2.0, 3.0, per_vector_clipping=True, model=model)
self.assertIsInstance(query, tensorflow_privacy.NestedQuery)
def check(subquery):
self.assertIsInstance(subquery, tensorflow_privacy.GaussianAverageQuery)
self.assertEqual(subquery._denominator, 3.0)
tf.nest.map_structure(check, query._queries)
noise_multipliers = tf.nest.flatten(
tf.nest.map_structure(
lambda query: query._numerator._stddev / query._numerator.
_l2_norm_clip, query._queries))
effective_noise_multiplier = sum([x**-2.0 for x in noise_multipliers])**-0.5
self.assertAlmostEqual(effective_noise_multiplier, 2.0)
class BuildDpAggregateTest(test.TestCase):
def test_dp_sum(self):
query = tensorflow_privacy.GaussianSumQuery(4.0, 0.0)
dp_aggregate_fn, _ = differential_privacy.build_dp_aggregate(query)
initialize, aggregate = wrap_aggregate_fn(dp_aggregate_fn, 0.0)
global_state = initialize()
global_state, result = aggregate(global_state, [1.0, 3.0, 5.0])
self.assertEqual(global_state['l2_norm_clip'], 4.0)
self.assertEqual(global_state['stddev'], 0.0)
self.assertEqual(result, 8.0)
def test_dp_sum_structure_odict(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
dp_aggregate_fn, _ = differential_privacy.build_dp_aggregate(query)
def datapoint(a, b):
return collections.OrderedDict([('a', (a,)), ('b', [b])])
data = [
datapoint(1.0, 2.0),
datapoint(2.0, 3.0),
datapoint(6.0, 8.0), # Clipped to 3.0, 4.0
]
initialize, aggregate = wrap_aggregate_fn(dp_aggregate_fn, data[0])
global_state = initialize()
global_state, result = aggregate(global_state, data)
self.assertEqual(global_state['l2_norm_clip'], 5.0)
self.assertEqual(global_state['stddev'], 0.0)
self.assertEqual(result['a'][0], 6.0)
self.assertEqual(result['b'][0], 9.0)
def test_dp_sum_structure_list(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def _value_type_fn(value):
del value
return [
computation_types.TensorType(tf.float32),
computation_types.TensorType(tf.float32),
]
dp_aggregate_fn, _ = differential_privacy.build_dp_aggregate(
query, value_type_fn=_value_type_fn)
def datapoint(a, b):
return [tf.Variable(a, name='a'), tf.Variable(b, name='b')]
data = [
datapoint(1.0, 2.0),
datapoint(2.0, 3.0),
datapoint(6.0, 8.0), # Clipped to 3.0, 4.0
]
initialize, aggregate = wrap_aggregate_fn(dp_aggregate_fn, data[0])
global_state = initialize()
global_state, result = aggregate(global_state, data)
self.assertEqual(global_state['l2_norm_clip'], 5.0)
self.assertEqual(global_state['stddev'], 0.0)
result = list(result)
self.assertEqual(result[0], 6.0)
self.assertEqual(result[1], 9.0)
def test_dp_stateful_mean(self):
class ShrinkingSumQuery(tensorflow_privacy.GaussianSumQuery):
def get_noised_result(self, sample_state, global_state):
global_state = self._GlobalState(
tf.maximum(global_state.l2_norm_clip - 1, 0.0), global_state.stddev)
return sample_state, global_state
query = ShrinkingSumQuery(4.0, 0.0)
dp_aggregate_fn, _ = differential_privacy.build_dp_aggregate(query)
initialize, aggregate = wrap_aggregate_fn(dp_aggregate_fn, 0.0)
global_state = initialize()
records = [1.0, 3.0, 5.0]
def run_and_check(global_state, expected_l2_norm_clip, expected_result):
global_state, result = aggregate(global_state, records)
self.assertEqual(global_state['l2_norm_clip'], expected_l2_norm_clip)
self.assertEqual(result, expected_result)
return global_state
self.assertEqual(global_state['l2_norm_clip'], 4.0)
global_state = run_and_check(global_state, 3.0, 8.0)
global_state = run_and_check(global_state, 2.0, 7.0)
global_state = run_and_check(global_state, 1.0, 5.0)
global_state = run_and_check(global_state, 0.0, 3.0)
global_state = run_and_check(global_state, 0.0, 0.0)
def test_dp_global_state_type(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
_, dp_global_state_type = differential_privacy.build_dp_aggregate(query)
self.assertEqual(dp_global_state_type.__class__.__name__,
'NamedTupleTypeWithPyContainerType')
def test_default_from_tff_result_fn(self):
def check(elements, expected):
record = anonymous_tuple.AnonymousTuple(elements)
result = differential_privacy._default_from_tff_result_fn(record)
self.assertEqual(result, expected)
check([('a', 1), ('b', 2)], collections.OrderedDict([('a', 1), ('b', 2)]))
check([(None, 1), (None, 2)], [1, 2])
with self.assertRaisesRegex(ValueError, 'partially named fields'):
check([('a', 1), (None, 2)], None)
class BuildDpAggregateProcessTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('float', 0.0), ('list', [0.0, 0.0]),
('odict', collections.OrderedDict([('a', 0.0), ('b', 0.0)])))
def test_process_type_signature(self, value_template):
query = tensorflow_privacy.GaussianSumQuery(4.0, 0.0)
value_type = type_conversions.type_from_tensors(value_template)
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
server_state_type = computation_types.FederatedType(
computation_types.NamedTupleType([('l2_norm_clip', tf.float32),
('stddev', tf.float32)]),
placements.SERVER)
self.assertEqual(
dp_aggregate_process.initialize.type_signature,
computation_types.FunctionType(
parameter=None, result=server_state_type))
client_value_type = computation_types.FederatedType(value_type,
placements.CLIENTS)
client_value_weight_type = computation_types.FederatedType(
tf.float32, placements.CLIENTS)
server_result_type = computation_types.FederatedType(
value_type, placements.SERVER)
server_metrics_type = computation_types.FederatedType((), placements.SERVER)
self.assertEqual(
dp_aggregate_process.next.type_signature,
computation_types.FunctionType(
parameter=computation_types.NamedTupleType([
(None, server_state_type), (None, client_value_type),
(None, client_value_weight_type)
]),
result=computation_types.NamedTupleType([
('state', server_state_type), ('result', server_result_type),
('measurements', server_metrics_type)
])))
def test_dp_sum(self):
query = tensorflow_privacy.GaussianSumQuery(4.0, 0.0)
value_type = type_conversions.type_from_tensors(0.0)
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, [1.0, 3.0, 5.0],
[1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 4.0)
self.assertEqual(output['state']['stddev'], 0.0)
self.assertEqual(output['result'], 8.0)
def test_dp_sum_structure_odict(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def datapoint(a, b):
return collections.OrderedDict([('a', (a,)), ('b', [b])])
data = [
datapoint(1.0, 2.0),
datapoint(2.0, 3.0),
datapoint(6.0, 8.0), # Clipped to 3.0, 4.0
]
value_type = type_conversions.type_from_tensors(data[0])
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, data, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 5.0)
self.assertEqual(output['state']['stddev'], 0.0)
self.assertEqual(output['result']['a'][0], 6.0)
self.assertEqual(output['result']['b'][0], 9.0)
def test_dp_sum_structure_nested_odict(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def datapoint(a, b, c):
return collections.OrderedDict([('a', (a,)),
('bc',
collections.OrderedDict([('b', [b]),
('c', (c,))]))])
data = [
datapoint(1.0, 2.0, 1.0),
datapoint(2.0, 3.0, 1.0),
datapoint(6.0, 8.0, 0.0), # Clipped to 3.0, 4.0, 0.0
]
value_type = type_conversions.type_from_tensors(data[0])
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, data, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 5.0)
self.assertEqual(output['state']['stddev'], 0.0)
self.assertEqual(output['result']['a'][0], 6.0)
self.assertEqual(output['result']['bc']['b'][0], 9.0)
self.assertEqual(output['result']['bc']['c'][0], 2.0)
def test_dp_sum_structure_complex(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def datapoint(a, b, c):
return collections.OrderedDict([('a', (a,)), ('bc', ([b], (c,)))])
data = [
datapoint(1.0, 2.0, 1.0),
datapoint(2.0, 3.0, 1.0),
datapoint(6.0, 8.0, 0.0), # Clipped to 3.0, 4.0, 0.0
]
value_type = type_conversions.type_from_tensors(data[0])
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, data, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 5.0)
self.assertEqual(output['state']['stddev'], 0.0)
self.assertEqual(output['result']['a'][0], 6.0)
self.assertEqual(output['result']['bc'][0][0], 9.0)
self.assertEqual(output['result']['bc'][1][0], 2.0)
def test_dp_sum_structure_list(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def datapoint(a, b):
return [tf.Variable(a, name='a'), tf.Variable(b, name='b')]
data = [
datapoint(1.0, 2.0),
datapoint(2.0, 3.0),
datapoint(6.0, 8.0), # Clipped to 3.0, 4.0
]
value_type = type_conversions.type_from_tensors(data[0])
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, data, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 5.0)
self.assertEqual(output['state']['stddev'], 0.0)
result = list(output['result'])
self.assertEqual(result[0], 6.0)
self.assertEqual(result[1], 9.0)
def test_dp_stateful_mean(self):
class ShrinkingSumQuery(tensorflow_privacy.GaussianSumQuery):
def get_noised_result(self, sample_state, global_state):
global_state = self._GlobalState(
tf.maximum(global_state.l2_norm_clip - 1, 0.0), global_state.stddev)
return sample_state, global_state
query = ShrinkingSumQuery(4.0, 0.0)
value_type = type_conversions.type_from_tensors(0.0)
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
records = [1.0, 3.0, 5.0]
def run_and_check(global_state, expected_l2_norm_clip, expected_result):
output = dp_aggregate_process.next(global_state, records, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], expected_l2_norm_clip)
self.assertEqual(output['result'], expected_result)
return output['state']
self.assertEqual(global_state['l2_norm_clip'], 4.0)
global_state = run_and_check(global_state, 3.0, 8.0)
global_state = run_and_check(global_state, 2.0, 7.0)
global_state = run_and_check(global_state, 1.0, 5.0)
global_state = run_and_check(global_state, 0.0, 3.0)
global_state = run_and_check(global_state, 0.0, 0.0)
if __name__ == '__main__':
default_executor.initialize_default_executor()
test.main()
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_privacy
from tensorflow_federated.python.common_libs import anonymous_tuple
from tensorflow_federated.python.common_libs import test
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import intrinsics
from tensorflow_federated.python.core.api import placements
from tensorflow_federated.python.core.impl.executors import default_executor
from tensorflow_federated.python.core.impl.types import placement_literals
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.utils import differential_privacy
def wrap_aggregate_fn(dp_aggregate_fn, sample_value):
tff_types = type_conversions.type_from_tensors(sample_value)
@computations.federated_computation
def run_initialize():
return intrinsics.federated_value(dp_aggregate_fn.initialize(),
placement_literals.SERVER)
@computations.federated_computation(run_initialize.type_signature.result,
computation_types.FederatedType(
tff_types,
placement_literals.CLIENTS))
def run_aggregate(global_state, client_values):
return dp_aggregate_fn(global_state, client_values)
return run_initialize, run_aggregate
class BuildDpQueryTest(test.TestCase):
def test_build_dp_query_basic(self):
query = differential_privacy.build_dp_query(1.0, 2.0, 3.0)
self.assertIsInstance(query, tensorflow_privacy.GaussianAverageQuery)
self.assertEqual(query._numerator._l2_norm_clip, 1.0)
self.assertEqual(query._numerator._stddev, 2.0)
self.assertEqual(query._denominator, 3.0)
def test_build_dp_query_adaptive(self):
ccba = 0.1
query = differential_privacy.build_dp_query(
1.0,
2.0,
3.0,
adaptive_clip_learning_rate=0.05,
target_unclipped_quantile=0.5,
clipped_count_budget_allocation=ccba,
expected_num_clients=10)
self.assertIsInstance(query,
tensorflow_privacy.QuantileAdaptiveClipAverageQuery)
self.assertIsInstance(query._numerator,
tensorflow_privacy.QuantileAdaptiveClipSumQuery)
expected_sum_query_noise_multiplier = 2.0 * (1.0 - ccba)**(-0.5)
self.assertAlmostEqual(query._numerator._noise_multiplier,
expected_sum_query_noise_multiplier)
self.assertEqual(query._denominator, 3.0)
def test_build_dp_query_per_vector(self):
class MockTensor():
def __init__(self, shape):
self.shape = shape
mock_shape = collections.namedtuple('MockShape', ['dims'])
mock_dim = collections.namedtuple('MockDim', ['value'])
mock_model = collections.namedtuple('MockModel', ['weights'])
mock_weights = collections.namedtuple('MockWeights', ['trainable'])
def make_mock_tensor(*dims):
return MockTensor(mock_shape([mock_dim(dim) for dim in dims]))
vectors = collections.OrderedDict([('a', make_mock_tensor(2)),
('b', make_mock_tensor(2, 3)),
('c', make_mock_tensor(1, 3, 4))])
model = mock_model(mock_weights(vectors))
query = differential_privacy.build_dp_query(
1.0, 2.0, 3.0, per_vector_clipping=True, model=model)
self.assertIsInstance(query, tensorflow_privacy.NestedQuery)
def check(subquery):
self.assertIsInstance(subquery, tensorflow_privacy.GaussianAverageQuery)
self.assertEqual(subquery._denominator, 3.0)
tf.nest.map_structure(check, query._queries)
noise_multipliers = tf.nest.flatten(
tf.nest.map_structure(
lambda query: query._numerator._stddev / query._numerator.
_l2_norm_clip, query._queries))
effective_noise_multiplier = sum([x**-2.0 for x in noise_multipliers])**-0.5
self.assertAlmostEqual(effective_noise_multiplier, 2.0)
class BuildDpAggregateTest(test.TestCase):
def test_dp_sum(self):
query = tensorflow_privacy.GaussianSumQuery(4.0, 0.0)
dp_aggregate_fn, _ = differential_privacy.build_dp_aggregate(query)
initialize, aggregate = wrap_aggregate_fn(dp_aggregate_fn, 0.0)
global_state = initialize()
global_state, result = aggregate(global_state, [1.0, 3.0, 5.0])
self.assertEqual(global_state['l2_norm_clip'], 4.0)
self.assertEqual(global_state['stddev'], 0.0)
self.assertEqual(result, 8.0)
def test_dp_sum_structure_odict(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
dp_aggregate_fn, _ = differential_privacy.build_dp_aggregate(query)
def datapoint(a, b):
return collections.OrderedDict([('a', (a,)), ('b', [b])])
data = [
datapoint(1.0, 2.0),
datapoint(2.0, 3.0),
datapoint(6.0, 8.0), # Clipped to 3.0, 4.0
]
initialize, aggregate = wrap_aggregate_fn(dp_aggregate_fn, data[0])
global_state = initialize()
global_state, result = aggregate(global_state, data)
self.assertEqual(global_state['l2_norm_clip'], 5.0)
self.assertEqual(global_state['stddev'], 0.0)
self.assertEqual(result['a'][0], 6.0)
self.assertEqual(result['b'][0], 9.0)
def test_dp_sum_structure_list(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def _value_type_fn(value):
del value
return [
computation_types.TensorType(tf.float32),
computation_types.TensorType(tf.float32),
]
dp_aggregate_fn, _ = differential_privacy.build_dp_aggregate(
query, value_type_fn=_value_type_fn)
def datapoint(a, b):
return [tf.Variable(a, name='a'), tf.Variable(b, name='b')]
data = [
datapoint(1.0, 2.0),
datapoint(2.0, 3.0),
datapoint(6.0, 8.0), # Clipped to 3.0, 4.0
]
initialize, aggregate = wrap_aggregate_fn(dp_aggregate_fn, data[0])
global_state = initialize()
global_state, result = aggregate(global_state, data)
self.assertEqual(global_state['l2_norm_clip'], 5.0)
self.assertEqual(global_state['stddev'], 0.0)
result = list(result)
self.assertEqual(result[0], 6.0)
self.assertEqual(result[1], 9.0)
def test_dp_stateful_mean(self):
class ShrinkingSumQuery(tensorflow_privacy.GaussianSumQuery):
def get_noised_result(self, sample_state, global_state):
global_state = self._GlobalState(
tf.maximum(global_state.l2_norm_clip - 1, 0.0), global_state.stddev)
return sample_state, global_state
query = ShrinkingSumQuery(4.0, 0.0)
dp_aggregate_fn, _ = differential_privacy.build_dp_aggregate(query)
initialize, aggregate = wrap_aggregate_fn(dp_aggregate_fn, 0.0)
global_state = initialize()
records = [1.0, 3.0, 5.0]
def run_and_check(global_state, expected_l2_norm_clip, expected_result):
global_state, result = aggregate(global_state, records)
self.assertEqual(global_state['l2_norm_clip'], expected_l2_norm_clip)
self.assertEqual(result, expected_result)
return global_state
self.assertEqual(global_state['l2_norm_clip'], 4.0)
global_state = run_and_check(global_state, 3.0, 8.0)
global_state = run_and_check(global_state, 2.0, 7.0)
global_state = run_and_check(global_state, 1.0, 5.0)
global_state = run_and_check(global_state, 0.0, 3.0)
global_state = run_and_check(global_state, 0.0, 0.0)
def test_dp_global_state_type(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
_, dp_global_state_type = differential_privacy.build_dp_aggregate(query)
self.assertEqual(dp_global_state_type.__class__.__name__,
'NamedTupleTypeWithPyContainerType')
def test_default_from_tff_result_fn(self):
def check(elements, expected):
record = anonymous_tuple.AnonymousTuple(elements)
result = differential_privacy._default_from_tff_result_fn(record)
self.assertEqual(result, expected)
check([('a', 1), ('b', 2)], collections.OrderedDict([('a', 1), ('b', 2)]))
check([(None, 1), (None, 2)], [1, 2])
with self.assertRaisesRegex(ValueError, 'partially named fields'):
check([('a', 1), (None, 2)], None)
class BuildDpAggregateProcessTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('float', 0.0), ('list', [0.0, 0.0]),
('odict', collections.OrderedDict([('a', 0.0), ('b', 0.0)])))
def test_process_type_signature(self, value_template):
query = tensorflow_privacy.GaussianSumQuery(4.0, 0.0)
value_type = type_conversions.type_from_tensors(value_template)
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
server_state_type = computation_types.FederatedType(
computation_types.NamedTupleType([('l2_norm_clip', tf.float32),
('stddev', tf.float32)]),
placements.SERVER)
self.assertEqual(
dp_aggregate_process.initialize.type_signature,
computation_types.FunctionType(
parameter=None, result=server_state_type))
client_value_type = computation_types.FederatedType(value_type,
placements.CLIENTS)
client_value_weight_type = computation_types.FederatedType(
tf.float32, placements.CLIENTS)
server_result_type = computation_types.FederatedType(
value_type, placements.SERVER)
server_metrics_type = computation_types.FederatedType((), placements.SERVER)
self.assertEqual(
dp_aggregate_process.next.type_signature,
computation_types.FunctionType(
parameter=computation_types.NamedTupleType([
(None, server_state_type), (None, client_value_type),
(None, client_value_weight_type)
]),
result=computation_types.NamedTupleType([
('state', server_state_type), ('result', server_result_type),
('measurements', server_metrics_type)
])))
def test_dp_sum(self):
query = tensorflow_privacy.GaussianSumQuery(4.0, 0.0)
value_type = type_conversions.type_from_tensors(0.0)
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, [1.0, 3.0, 5.0],
[1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 4.0)
self.assertEqual(output['state']['stddev'], 0.0)
self.assertEqual(output['result'], 8.0)
def test_dp_sum_structure_odict(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def datapoint(a, b):
return collections.OrderedDict([('a', (a,)), ('b', [b])])
data = [
datapoint(1.0, 2.0),
datapoint(2.0, 3.0),
datapoint(6.0, 8.0), # Clipped to 3.0, 4.0
]
value_type = type_conversions.type_from_tensors(data[0])
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, data, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 5.0)
self.assertEqual(output['state']['stddev'], 0.0)
self.assertEqual(output['result']['a'][0], 6.0)
self.assertEqual(output['result']['b'][0], 9.0)
def test_dp_sum_structure_nested_odict(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def datapoint(a, b, c):
return collections.OrderedDict([('a', (a,)),
('bc',
collections.OrderedDict([('b', [b]),
('c', (c,))]))])
data = [
datapoint(1.0, 2.0, 1.0),
datapoint(2.0, 3.0, 1.0),
datapoint(6.0, 8.0, 0.0), # Clipped to 3.0, 4.0, 0.0
]
value_type = type_conversions.type_from_tensors(data[0])
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, data, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 5.0)
self.assertEqual(output['state']['stddev'], 0.0)
self.assertEqual(output['result']['a'][0], 6.0)
self.assertEqual(output['result']['bc']['b'][0], 9.0)
self.assertEqual(output['result']['bc']['c'][0], 2.0)
def test_dp_sum_structure_complex(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def datapoint(a, b, c):
return collections.OrderedDict([('a', (a,)), ('bc', ([b], (c,)))])
data = [
datapoint(1.0, 2.0, 1.0),
datapoint(2.0, 3.0, 1.0),
datapoint(6.0, 8.0, 0.0), # Clipped to 3.0, 4.0, 0.0
]
value_type = type_conversions.type_from_tensors(data[0])
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, data, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 5.0)
self.assertEqual(output['state']['stddev'], 0.0)
self.assertEqual(output['result']['a'][0], 6.0)
self.assertEqual(output['result']['bc'][0][0], 9.0)
self.assertEqual(output['result']['bc'][1][0], 2.0)
def test_dp_sum_structure_list(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def datapoint(a, b):
return [tf.Variable(a, name='a'), tf.Variable(b, name='b')]
data = [
datapoint(1.0, 2.0),
datapoint(2.0, 3.0),
datapoint(6.0, 8.0), # Clipped to 3.0, 4.0
]
value_type = type_conversions.type_from_tensors(data[0])
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, data, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 5.0)
self.assertEqual(output['state']['stddev'], 0.0)
result = list(output['result'])
self.assertEqual(result[0], 6.0)
self.assertEqual(result[1], 9.0)
def test_dp_stateful_mean(self):
class ShrinkingSumQuery(tensorflow_privacy.GaussianSumQuery):
def get_noised_result(self, sample_state, global_state):
global_state = self._GlobalState(
tf.maximum(global_state.l2_norm_clip - 1, 0.0), global_state.stddev)
return sample_state, global_state
query = ShrinkingSumQuery(4.0, 0.0)
value_type = type_conversions.type_from_tensors(0.0)
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
records = [1.0, 3.0, 5.0]
def run_and_check(global_state, expected_l2_norm_clip, expected_result):
output = dp_aggregate_process.next(global_state, records, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], expected_l2_norm_clip)
self.assertEqual(output['result'], expected_result)
return output['state']
self.assertEqual(global_state['l2_norm_clip'], 4.0)
global_state = run_and_check(global_state, 3.0, 8.0)
global_state = run_and_check(global_state, 2.0, 7.0)
global_state = run_and_check(global_state, 1.0, 5.0)
global_state = run_and_check(global_state, 0.0, 3.0)
global_state = run_and_check(global_state, 0.0, 0.0)
if __name__ == '__main__':
default_executor.initialize_default_executor()
test.main()
|
en
| 0.849148
|
# Copyright 2019, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Clipped to 3.0, 4.0 # Clipped to 3.0, 4.0 # Clipped to 3.0, 4.0 # Clipped to 3.0, 4.0, 0.0 # Clipped to 3.0, 4.0, 0.0 # Clipped to 3.0, 4.0
| 1.818897
| 2
|
Code/set_test.py
|
kmurata798/CS-1.3-Core-Data-Structures
| 0
|
6625870
|
from set import HashSet
import unittest
class SetTest(unittest.TestCase):
def test_init(self):
elements = ['1', '2', '3']
set = HashSet(elements)
assert set.size is 3
def test_size(self):
elements = ['A', 'B', 'C', '1', '2']
set = HashSet(elements)
assert set.size is 5
def test_contains(self):
elements = ['K', 'Q', 'J', 'O']
set = HashSet(elements)
assert set.contains('K') is True
assert set.contains('J') is True
assert set.contains('Q') is True
assert set.contains('C') is False
assert set.contains('W') is False
def test_add(self):
elements = ['R', 'V']
set = HashSet(elements)
set.add('M')
set.add('T')
with self.assertRaises(KeyError):
set.add('M') # Element already exists
with self.assertRaises(KeyError):
set.add('V') # Element already exists
assert set.size is 4
assert set.contains('R') is True
assert set.contains('T') is True
def test_remove(self):
elements = ['Z', 'I', 'Q', '4', 'S']
set = HashSet(elements)
with self.assertRaises(KeyError):
set.remove('A') # Element doesn not exist
with self.assertRaises(KeyError):
set.remove('X') # Element does not exist
set.remove('Z')
set.remove('4')
assert set.contains('Z') is False
assert set.contains('4') is False
with self.assertRaises(KeyError):
set.remove('Z') # Element does not exist anymore
def test_union(self):
elements = ['W', 'X', 'Y', 'Z']
elements2 = ['A', 'B', 'D', 'E', 'G', 'I']
elements3 = ['C', 'V', 'M', 'N']
set1 = HashSet(elements)
set2 = HashSet(elements2)
set3 = HashSet(elements3)
self.assertCountEqual(set1.union(set2).hash.values(), ['A', 'B', 'D', 'E', 'G', 'I', 'W', 'X', 'Y', 'Z']) # Item order does not matter
self.assertCountEqual(set1.union(set3).hash.values(), ['C', 'M', 'N', 'V', 'W', 'X', 'Y', 'Z']) # Item order does not matter
def test_intersection(self):
elements = ['2', 'A', 'B', 'C']
elements2 = ['0', 'A', 'C', 'E', 'X', '2']
elements3 = ['B', 'J', 'L', 'K', 'C', '2']
set1 = HashSet(elements)
set2 = HashSet(elements2)
set3 = HashSet(elements3)
self.assertCountEqual(set1.intersection(set2).hash.values(), ['A', 'C', '2']) # Item order does not matter
self.assertCountEqual(set1.intersection(set3).hash.values(), ['B', 'C', '2']) # Item order does not matter
def test_difference(self):
elements = ['5', '6', '8', '10', '1']
elements2 = ['1', '5', '6', '7', '2', '9']
elements3 = ['2', '4', '6', '9', '10']
set1 = HashSet(elements)
set2 = HashSet(elements2)
set3 = HashSet(elements3)
self.assertCountEqual(set1.difference(set2).hash.values(), ['8', '10']) # Item order does not matter
self.assertCountEqual(set1.difference(set3).hash.values(), ['5', '8', '1']) # Item order does not matter
def test_is_subset(self):
elements = ['A', 'G', 'R']
elements2 = ['G', 'D', 'R', 'P', 'A', 'W']
elements3 = ['I', 'O', 'S', 'K', 'M', 'Z']
set1 = HashSet(elements)
set2 = HashSet(elements2)
set3 = HashSet(elements3)
assert set1.is_subset(set2) is True
assert set1.is_subset(set3) is False
assert set2.is_subset(set3) is False
if __name__ == '__main__':
unittest.main()
|
from set import HashSet
import unittest
class SetTest(unittest.TestCase):
def test_init(self):
elements = ['1', '2', '3']
set = HashSet(elements)
assert set.size is 3
def test_size(self):
elements = ['A', 'B', 'C', '1', '2']
set = HashSet(elements)
assert set.size is 5
def test_contains(self):
elements = ['K', 'Q', 'J', 'O']
set = HashSet(elements)
assert set.contains('K') is True
assert set.contains('J') is True
assert set.contains('Q') is True
assert set.contains('C') is False
assert set.contains('W') is False
def test_add(self):
elements = ['R', 'V']
set = HashSet(elements)
set.add('M')
set.add('T')
with self.assertRaises(KeyError):
set.add('M') # Element already exists
with self.assertRaises(KeyError):
set.add('V') # Element already exists
assert set.size is 4
assert set.contains('R') is True
assert set.contains('T') is True
def test_remove(self):
elements = ['Z', 'I', 'Q', '4', 'S']
set = HashSet(elements)
with self.assertRaises(KeyError):
set.remove('A') # Element doesn not exist
with self.assertRaises(KeyError):
set.remove('X') # Element does not exist
set.remove('Z')
set.remove('4')
assert set.contains('Z') is False
assert set.contains('4') is False
with self.assertRaises(KeyError):
set.remove('Z') # Element does not exist anymore
def test_union(self):
elements = ['W', 'X', 'Y', 'Z']
elements2 = ['A', 'B', 'D', 'E', 'G', 'I']
elements3 = ['C', 'V', 'M', 'N']
set1 = HashSet(elements)
set2 = HashSet(elements2)
set3 = HashSet(elements3)
self.assertCountEqual(set1.union(set2).hash.values(), ['A', 'B', 'D', 'E', 'G', 'I', 'W', 'X', 'Y', 'Z']) # Item order does not matter
self.assertCountEqual(set1.union(set3).hash.values(), ['C', 'M', 'N', 'V', 'W', 'X', 'Y', 'Z']) # Item order does not matter
def test_intersection(self):
elements = ['2', 'A', 'B', 'C']
elements2 = ['0', 'A', 'C', 'E', 'X', '2']
elements3 = ['B', 'J', 'L', 'K', 'C', '2']
set1 = HashSet(elements)
set2 = HashSet(elements2)
set3 = HashSet(elements3)
self.assertCountEqual(set1.intersection(set2).hash.values(), ['A', 'C', '2']) # Item order does not matter
self.assertCountEqual(set1.intersection(set3).hash.values(), ['B', 'C', '2']) # Item order does not matter
def test_difference(self):
elements = ['5', '6', '8', '10', '1']
elements2 = ['1', '5', '6', '7', '2', '9']
elements3 = ['2', '4', '6', '9', '10']
set1 = HashSet(elements)
set2 = HashSet(elements2)
set3 = HashSet(elements3)
self.assertCountEqual(set1.difference(set2).hash.values(), ['8', '10']) # Item order does not matter
self.assertCountEqual(set1.difference(set3).hash.values(), ['5', '8', '1']) # Item order does not matter
def test_is_subset(self):
elements = ['A', 'G', 'R']
elements2 = ['G', 'D', 'R', 'P', 'A', 'W']
elements3 = ['I', 'O', 'S', 'K', 'M', 'Z']
set1 = HashSet(elements)
set2 = HashSet(elements2)
set3 = HashSet(elements3)
assert set1.is_subset(set2) is True
assert set1.is_subset(set3) is False
assert set2.is_subset(set3) is False
if __name__ == '__main__':
unittest.main()
|
en
| 0.908311
|
# Element already exists # Element already exists # Element doesn not exist # Element does not exist # Element does not exist anymore # Item order does not matter # Item order does not matter # Item order does not matter # Item order does not matter # Item order does not matter # Item order does not matter
| 3.803358
| 4
|
ddot/Ontology.py
|
antonkratz/ddot
| 0
|
6625871
|
from __future__ import absolute_import, print_function, division
import itertools, multiprocessing, logging, os, collections, random, math, sys, time
from itertools import groupby, combinations
from operator import *
from collections import Counter
import tempfile
from subprocess import Popen, PIPE, STDOUT
import inspect
import shlex
import shutil
import io
from io import StringIO
import json
import datetime
import numpy as np
import pandas as pd
import pandas.io.pickle
import networkx as nx
import igraph
import scipy, scipy.sparse
from scipy.sparse import csr_matrix, coo_matrix
from scipy.stats import hypergeom
import ndex.client as nc
from ndex.networkn import NdexGraph
import ndex.beta.layouts as layouts
import ddot
import ddot.config
from ddot.utils import time_print, set_node_attributes_from_pandas, set_edge_attributes_from_pandas, nx_to_NdexGraph, NdexGraph_to_nx, parse_ndex_uuid, parse_ndex_server, make_index, update_nx_with_alignment, bubble_layout_nx, split_indices_chunk, invert_dict, make_network_public, nx_edges_to_pandas, nx_nodes_to_pandas, ig_edges_to_pandas, ig_nodes_to_pandas, melt_square, nx_set_tree_edges, gridify
def _collapse_node(g,
v,
edge_filter=None,
use_v_name=False,
combine_attrs=None,
default_attr=None,
verbose=True,
fast_collapse=False,
delete=True):
"""Collapses a node in a Graph (igraph package) while preserving
long-range hierarchical relations between descendants and
ancestral nodes.
"""
if use_v_name:
assert isinstance(v, str)
v = g.vs.find(name_eq=v).index
try:
g.vs[v]
except:
raise Exception("Can't find vertex %s in graph. Consider setting use_v_name=True" % v)
if fast_collapse:
parents = g.neighbors(v, mode='out')
children = g.neighbors(v, mode='in')
if len(parents) > 0 and len(children) > 0:
# A faster collapse that adds all new edges
# simultaneously. Ignores edge attributes
new_edges = [(c, p) for p in parents for c in children]
new_edges = [x for x, y in zip(new_edges, g.get_eids(new_edges, error=False)) if y == -1]
g.add_edges(new_edges)
else:
g.es['collapsed_length'] = 0
g.es['collapsed_terms'] = [[] for x in g.es]
in_edges = g.es[g.incident(v, mode='in')]
out_edges = g.es[g.incident(v, mode='out')]
if edge_filter is not None:
in_edges = [e for e in in_edges if edge_filter(e)]
out_edges = [e for e in out_edges if edge_filter(e)]
for e_in in in_edges:
for e_out in out_edges:
in_neigh, out_neigh = e_in.source, e_out.target
# Only add an edge if it doesn't already exist
if g[in_neigh, out_neigh] == 0:
g.add_edge(in_neigh, out_neigh)
e = g.es[g.get_eid(in_neigh, out_neigh)]
if combine_attrs is not None:
# Set default value of edge attributes to 0
for key in combine_attrs: e[key] = None
e = g.es[g.get_eid(in_neigh, out_neigh)]
# Update attributes
if combine_attrs is not None:
for key in combine_attrs:
e[key] = combine_attrs[key](e_in, e_out, e)
if verbose and key=='triangle_edge_priority':
print('Setting',
key,
g.vs[in_neigh]['name'],
g.vs[out_neigh]['name'],
'to',
combine_attrs[key](e_in, e_out, e),
(e_in[key], e_out[key]))
e['collapsed_length'] = e_in['collapsed_length'] + e_out['collapsed_length']
e['collapsed_terms'] = e_in['collapsed_terms'] + [g.vs[v]['name']] + e_out['collapsed_terms']
if delete:
g.delete_vertices(v)
return g
def read_alignment_file(f, source='Term_1'):
"""Parses an alignment file created from alignOntology's calculateFDRs script
Parameters
-----------
f : str
Filename of alignment file
source : str
Indicates which ontology will be the index of the
returned pandas.DataFrame. Value must be either 'Term_1' (first
ontology) or 'Term_2' (second ontology)
Returns
--------
: pandas.DataFrame
DataFrame with four columns: 'Term', 'Similarity', 'FDR', and 'Size'.
The index of the DataFrame are the names of terms in the "source" ontology.
"""
# Five columns in the input file
# 1) Term from first "computed" ontology
# 2) Term from second "reference" ontology
# 3) Similarity value
# 4) FDR
# 5) Size of the term in the first ontology
df = pd.read_table(f,
names=['Term_1', 'Term_2', 'Similarity', 'FDR', 'Size'],
dtype={'Term_1':str,
'Term_2':str,
'Similarity':np.float64,
'FDR':np.float64,
'Size':np.int64},
header=None)
target = 'Term_2' if source=='Term_1' else 'Term_1'
df.rename(columns={target : 'Term'}, inplace=True)
df.set_index(source, inplace=True)
df.index.rename('Term', inplace=True)
return df
def align_hierarchies(hier1,
hier2,
iterations,
threads,
update_hier1=False,
update_hier2=False,
calculateFDRs=None,
mutual_collapse=True,
output=None,
verbose=False):
if output is None:
with tempfile.NamedTemporaryFile('w', delete=True) as output_file:
return align_hierarchies(hier1, hier2, iterations, threads,
update_hier1=update_hier1, update_hier2=update_hier2,
mutual_collapse=mutual_collapse,
output=output_file.name,
calculateFDRs=calculateFDRs,
verbose=verbose)
common_genes = set(hier1.genes) & set(hier2.genes)
hier1_orig, hier2_orig = hier1, hier2
if len(common_genes) > 0:
if mutual_collapse:
hier1, hier2 = Ontology.mutual_collapse(hier1, hier2, verbose=verbose)
hier1.clear_node_attr()
hier1.clear_edge_attr()
hier2.clear_node_attr()
hier2.clear_edge_attr()
hier1.propagate('reverse', inplace=True)
hier2.propagate('reverse', inplace=True)
def to_file(hier):
if isinstance(hier, Ontology):
with tempfile.NamedTemporaryFile('w', delete=False) as f:
hier.to_table(f, clixo_format=True)
hier = f.name
else:
assert isinstance(hier, file) or os.path.exists(hier)
return hier
hier1 = to_file(hier1)
hier2 = to_file(hier2)
if calculateFDRs is None:
top_level = os.path.dirname(os.path.abspath(inspect.getfile(ddot)))
calculateFDRs = os.path.join(top_level, 'alignOntology', 'calculateFDRs')
#assert os.path.isdir(ddot.config.alignOntology)
#calculateFDRs = os.path.join(ddot.config.alignOntology, 'calculateFDRs')
assert os.path.isfile(calculateFDRs)
if threads is None:
import multiprocessing
threads = multiprocessing.cpu_count()
output_dir = tempfile.mkdtemp(prefix='tmp')
cmd = '{5} {0} {1} 0.05 criss_cross {2} {3} {4} gene'.format(
hier1, hier2, output_dir, iterations, threads, calculateFDRs)
print('Alignment command:', cmd)
p = Popen(shlex.split(cmd), shell=False)
try:
p.wait()
shutil.copy(os.path.join(output_dir, 'alignments_FDR_0.1_t_0.1'), output)
finally:
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
if p.poll() is None:
if verbose: time_print('Killing alignment process %s. Output: %s' % (p.pid, output))
p.kill() # Kill the process
align1 = read_alignment_file(output)[['Term', 'Similarity', 'FDR']]
else:
align1 = pd.DataFrame(columns=['Term', 'Similarity', 'FDR'])
align2 = align1.copy()
align2.index, align2['Term'] = align2['Term'].values.copy(), align2.index.values.copy()
append_prefix = lambda x: 'Aligned_%s' % x
if update_hier1:
if hasattr(update_hier1, '__iter__'):
node_attr = hier2_orig.node_attr[update_hier1]
else:
node_attr = hier2_orig.node_attr
hier2_import = pd.merge(pd.DataFrame(index=align2.index), node_attr, left_index=True, right_index=True, how='left')
assert (hier2_import.index == align2.index).all()
# Change index to terms in hier1
hier2_import.index = align2['Term'].copy()
hier2_import.rename(columns=append_prefix, inplace=True)
if update_hier2:
if hasattr(update_hier2, '__iter__'):
node_attr = hier1_orig.node_attr[update_hier2]
else:
node_attr = hier1_orig.node_attr
hier1_import = pd.merge(pd.DataFrame(index=align1.index), node_attr, left_index=True, right_index=True, how='left')
assert (hier1_import.index == align1.index).all()
# Change index to terms in hier2
hier1_import.index = align1['Term'].copy()
hier1_import.rename(columns=append_prefix, inplace=True)
if update_hier1:
hier1_orig.update_node_attr(align1.rename(columns=append_prefix))
hier1_orig.update_node_attr(hier2_import)
if update_hier2:
hier2_orig.update_node_attr(align2.rename(columns=append_prefix))
hier2_orig.update_node_attr(hier1_import)
return align1
def parse_obo(obo,
output_file=None,
id2name_file=None,
id2namespace_file=None,
alt_id_file=None):
"""Parses an OBO file and writes the results into several tables.
Parameters
----------
obo : str
Filename of OBO file
output_file : str
Filename to write table that describes the ontology's
hierarchical structure. The table has four columns: (1) parent
term, (2) child term, (3) relation type (e.g. "is_a" or
"part_of"), (4) namespace of relation
(e.g. "biological_process" or "cellular component")
id2name_file : str
Filename to write table of term descriptions. The table has
two columns: (1) Ontology term (e.g. "GO:0000030"), (2)
description (e.g. "mannosyltransferase activity")
id2namespace_file : str
Filename to write table of term namespaces. The table has two
columns: (1) Ontology term (e.g. "GO:0000030"), (2) namespace
of the term (e.g. "biological_process")
alt_id_file : str
Filename to write table of alternative Term IDs that are
synonyms and refer to the same term. The table has two
columns: (1) Primary Term ID, (2) Alternative Term ID
"""
## Keywords that screw up parsing:
# import, is_anonymous, intersection_of, union_of
## Relations
# 'is_a:'
# 'relationship: has_part' # Not in filtered GO
# 'relationship: occurs_in' # Not in filtered GO
# 'relationship: part_of'
# 'relationship: positively_regulates'
# 'relationship: negatively_regulates'
# 'relationship: regulates'
# 'relationship: results_in' # Not in filtered GO
stanza, edges = [], []
id2name = dict()
id2namespace = dict()
alt_id = dict()
in_term_stanza = False
default_namespace_exists = False
for line in io.open(obo).read().splitlines():
line = line.split('!')[0].strip() # Remove comments
if len(line)>0 and line[0]=='[' and line[-1]==']':
# Add last stanza if it was a term stanza. Include namespace.
if in_term_stanza:
edges.extend(x+(namespace, ) for x in stanza)
# Start new term stanza
stanza = []
# Set the default namespace, if it exists
if default_namespace_exists:
namespace = default_namespace
# In a term stanzo or not
in_term_stanza = line =='[Term]'
name = None
#if 'alt_id:' in line: assert False
if 'id:' == line[:3]:
curr_term = line.split('id:')[1].strip()
elif 'alt_id:' in line:
alt_term = line.split('alt_id:')[1].strip()
if curr_term in alt_id: alt_id[curr_term].append(alt_term)
else: alt_id[curr_term] = [alt_term]
id2name[alt_term] = name
elif 'name:' in line:
name = line.split('name:')[1].strip()
assert not curr_term in id2name
id2name[curr_term] = name
elif 'is_a:' in line:
parent = line.split('is_a:')[1].strip()
stanza.append((parent, curr_term, 'is_a'))
elif 'relationship:' in line:
line = line.split('relationship:')[1].strip().split()
if len(line)!=2: print(line)
assert len(line)==2
relation, parent = line
stanza.append((parent, curr_term, relation))
elif 'namespace:' == line[:10]:
namespace = line.split('namespace:')[1].strip()
assert not curr_term in id2namespace
id2namespace[curr_term] = namespace
elif 'default-namespace:' == line[:18]:
namespace = line.split('default-namespace:')[1].strip()
default_namespace_exists = True
default_namespace = namespace
pd.DataFrame(edges).to_csv(output_file, header=False, index=False, sep='\t')
pd.Series(id2name).to_csv(id2name_file, sep='\t')
pd.Series(id2namespace).to_csv(id2namespace_file, sep='\t')
pd.Series(dict([(a, c) for a, b in alt_id.items() for c in b])).to_csv(alt_id_file, sep='\t')
def parse_gaf(gaf):
"""
Read gene-term annotations from GAF file format:
http://geneontology.org/page/go-annotation-file-gaf-format-21
Parameters
----------
gaf : str
Filename of GAF file
Returns
--------
A list of 2-tuples (gene, GO term)
"""
gaf_columns = ['DB', 'DB Object ID', 'DB Object Symbol',
'Qualifier', 'GO ID', 'DB:Reference',
'Evidence Code', 'With (or) From', 'Aspect',
'DB Object Name', 'DB Object Synonym',
'DB Object Type', 'Taxon', 'Date',
'Assigned By', 'Annotation Extension',
'Gene Product Form ID']
df = pd.read_table(gaf, header=None, comment='!', names=gaf_columns)
# Check that all annotations are to UniProtKB protein IDs
# assert df['DB'].unique().size == 1 and df['DB'].unique()[0]=='UniProtKB'
# Remove annotations that have a NOT qualifier
df = df.loc[df['Qualifier']!='NOT', :]
# return df.loc[:, ['DB Object ID', 'GO ID']].values.tolist()
return df
class Ontology(object):
"""A Python representation for constructing, analyzing, and
manipulating the hierarchical structure of ontologies.
An Ontology object contains the following attributes for
representing the hierarchical structure. Do not directly modify
these attributes.
Parameters
----------
genes : list
Names of genes
terms : list
Names of terms
gene_2_term : dict
gene_2_term[<gene>] --> list of terms connected to
<gene>. Terms are represented as their 0-based index in
self.terms.
term_2_gene : dict
term_2_gene[<term>] --> list of genes connected to
<term>. Genes are represented as their 0-based index in
self.genes.
child_2_parent : dict
child_2_parent[<child>] --> list of the parent terms of <child>
parent_2_child : dict
parent_2_child[<parent>] --> list of the children terms of <parent>
term_sizes : list
A list of every term's size, i.e. the number of unique genes
that it and its descendant terms contain. This list has the
same order as self.terms. It holds that for every i,
`term_sizes[i] = len(self.term_2_gene[self.terms[i]])`
"""
NODETYPE_ATTR = 'NodeType'
GENE_NODETYPE = 'Gene'
TERM_NODETYPE = 'Term'
EDGETYPE_ATTR = 'EdgeType'
GENE_TERM_EDGETYPE = 'Gene-Term'
CHILD_PARENT_EDGETYPE = 'Child-Parent'
def __init__(self,
hierarchy,
mapping,
edge_attr=None,
node_attr=None,
parent_child=False,
add_root_name=None,
propagate=None,
ignore_orphan_terms=False,
verbose=True,
**kwargs):
"""Construct an Ontology object.
Parameters
----------
hierarchy : list, tuple
Iterable of (child term, parent term). E.g. list of 2-tuples
mapping : list, tuple
Iterable of (gene, term) pairs. E.g. list of 2-tuples
edge_attr : pandas.DataFrame
Meta-data describing (child_term, parent_term)
pairs. Suggestion: The index of the DataFrame must be a
pandas.MultiIndex, where the first level is the child term
and the second level is the parent term.
parent_child : bool
If True, then the definitions of <hierarchy> and <mapping>
are reversed so that they iterate over (parent term, child
term) and (term, gene) pairs.
propagate : None, str
The direction ('forward' or 'reverse') to propagate
gene-term annotations up the hierarchy with
Ontology.propagate(). If None, then don't
propagate annotations.
add_root_name : bool
The name of an artificial root. If there are multiple
roots in the ontology, then they are joined into one root
with this name. Default: Don't create this root.
ignore_orphan_terms : bool
"""
if 'empty' in kwargs and kwargs['empty'] is True:
return
if parent_child:
hierarchy = [(x[1],x[0]) for x in hierarchy]
mapping = [(x[1],x[0]) for x in mapping]
# Cast all node names to strings
hierarchy = [(str(x[0]),str(x[1])) for x in hierarchy]
mapping = [(str(x[0]),str(x[1])) for x in mapping]
## Read term-to-term edges
# parent_2_child[<term_name>] --> list of <term_name>'s children terms
self.parent_2_child = {r: [p[0] for p in q] for r, q in \
itertools.groupby(sorted(hierarchy,
key=lambda a:a[1]),
key=lambda a:a[1])}
## Read gene-to-term edges
# self.gene_2_term[<gene_name>] --> list of terms that <gene_name> is mapped to
self.gene_2_term = {key: set([a[1] for a in group]) for key, group in \
itertools.groupby(sorted(mapping,
key=lambda a:a[0]),
key=lambda a:a[0])}
## Check that the set of terms is the same according to
## parent_2_child and self.gene_2_term
terms_A = set.union(set(self.parent_2_child.keys()),
*[set(x) for x in self.parent_2_child.values()])
if len(self.gene_2_term) > 0:
terms_B = set.union(*self.gene_2_term.values())
else:
terms_B = set([])
if verbose and ignore_orphan_terms and len(terms_B - terms_A)>0:
print('WARNING: Ignoring {} terms are connected to genes but not to other terms'.format(len(terms_B - terms_A)))
# if verbose and len(terms_A - terms_B)>0:
# print 'WARNING: {} terms connected to other terms but not to genes'.format(len(terms_A - terms_B))
if ignore_orphan_terms:
self.terms = sorted(terms_A)
else:
self.terms = sorted(terms_A | terms_B)
self.genes = sorted(self.gene_2_term.keys())
if add_root_name is not None:
root_list = self.get_roots()
if len(root_list) > 1:
print('Unifying %s roots into one super-root' % len(root_list))
self.parent_2_child[add_root_name] = root_list
self.terms.append(add_root_name)
## terms_index[<term_name>] --> index in self.terms
self.terms_index = make_index(self.terms)
## self.genes_index[<gene_name>] --> index in self.genes
self.genes_index = make_index(self.genes)
## Convert self.gene_2_term to list term indices rather than term names
for k, v in self.gene_2_term.items():
self.gene_2_term[k] = [self.terms_index[x] for x in self.gene_2_term[k] if x in self.terms_index]
if node_attr is None:
self.clear_node_attr()
else:
assert node_attr.index.nlevels == 1
if node_attr.index.name != 'Node':
# if verbose:
# print("Changing node_attr index name from %s to 'Node'" % node_attr.index.name)
# # import traceback
# # print traceback.print_stack()
node_attr.index.name = 'Node'
self.node_attr = node_attr
if edge_attr is None:
self.clear_edge_attr()
else:
assert edge_attr.index.nlevels == 2
edge_attr.index.names = ['Child', 'Parent']
# if 'Child' in edge_attr.index.names and 'Parent' in edge_attr.index.names:
# edge_attr.index = edge_attr.index[['Child', 'Parent']]
# else:
# edge_attr.index.names = ['Child', 'Parent']
# if edge_attr.index.names != ['Child', 'Parent']:
# if verbose:
# print("Changing edge_attr index names from %s to ['Child', 'Parent']" % edge_attr.index.names)
# edge_attr.index.names = ['Child', 'Parent']
self.edge_attr = edge_attr
self._update_fields()
if propagate:
self.propagate(direction=propagate, inplace=True)
self._update_fields()
self._check_valid()
# ## Not necessary and requires extra start-up time (perhaps set as a __init__ parameter to precalculate many things)
# empty_terms = sum([x==0 for x in self.term_sizes])
# if verbose and empty_terms > 0:
# print 'WARNING: {} terms are connected to other terms but not to genes'.format(empty_terms), [t for t, x in zip(self.terms, self.term_sizes) if x==0][:5]
# # import traceback
# # print traceback.print_stack()
def _update_fields(self, reset_term_sizes=True):
self.child_2_parent = self._get_child_2_parent()
self.term_2_gene = self._get_term_2_gene()
if reset_term_sizes:
self._term_sizes = None
for t in self.terms:
if t not in self.parent_2_child:
self.parent_2_child[t] = []
if t not in self.child_2_parent:
self.child_2_parent[t] = []
def add_root(self, root_name, inplace=False):
"""Check if there is a single unifying root term of the ontology. If
not, then identify the multiple roots and join them under an
artificial root."""
if inplace:
ont = self
else:
ont = self.copy()
assert root_name not in ont.terms
root_list = ont.get_roots()
if len(root_list) > 1:
print('Unifying %s roots into one super-root' % len(root_list))
ont.parent_2_child[root_name] = root_list
ont.terms.append(root_name)
ont.terms_index = make_index(sorted(ont.terms))
for g, t_list in ont.gene_2_term.items():
ont.gene_2_term[g] = [ont.terms_index[ont.terms[t]] for t in t_list]
ont.terms.sort()
ont._update_fields()
return ont
def _get_child_2_parent(self):
"""
Converts self.parent_2_child to child_2_parent
# child_2_parent[<term_name>] --> list of <term_name>'s parent term names
"""
cp_pairs = []
for p, c_list in self.parent_2_child.items():
for c in c_list:
cp_pairs.append((c,p))
first = lambda a: a[0]
cp_pairs.sort(key=first)
child_2_parent = {
r: [p[1] for p in q] for r, q in
itertools.groupby(cp_pairs, key=first)
}
for t in self.terms:
if t not in child_2_parent:
child_2_parent[t] = []
return child_2_parent
def clear_node_attr(self):
"""Resets the node attributes to be empty."""
self.node_attr = pd.DataFrame()
self.node_attr.index.name = 'Node'
def clear_edge_attr(self):
"""Resets the edge attributes to be empty."""
self.edge_attr = pd.DataFrame()
self.edge_attr.index = pd.MultiIndex(levels=[[],[]],
codes=[[],[]],
names=['Child', 'Parent'])
def update_node_attr(self, node_attr):
"""Update existing node attributes or add new node attributes.
Parameters
----------
node_attr : pandas.DataFrame
Dataframe where index are the names of genes or terms and
where the columns are the names of node attributes.
"""
####
# TODO : make sure that renaming/deleting/collapsing of genes and columns respect the node_attr and edge_attr
# Filter for genes and terms in the ontology
nodes = set(self.genes) | set(self.terms)
node_attr = node_attr.loc[[x for x in node_attr.index if x in nodes], :]
assert node_attr.index.duplicated().sum() == 0
# Update index to the union of current and new node_attr
self.node_attr = self.node_attr.reindex(self.node_attr.index.union(node_attr.index))
# Update columns
for col in node_attr.columns:
self.node_attr.loc[node_attr.index, col] = node_attr[col]
def update_edge_attr(self, edge_attr):
"""Update existing edge attributes or add new edge attributes.
Parameters
----------
edge_attr : pandas.DataFrame
Dataframe where the index is a MultiIndex represents edges
in the Ontology, such that the first level is the name of
a gene or child term, and the second level is the name of
a parent term. Columns are the names of edge attributes.
"""
# Filter for genes and terms in the ontology
edges = []
for child, parent_list in self.child_2_parent.items():
for parent in parent_list:
edges.append((child, parent))
for gene, term_list in self.gene_2_term.items():
for term in term_list:
edges.append((gene, self.terms[term]))
edges = set(edges)
edge_attr = edge_attr.loc[[x for x in edge_attr.index if x in edges], :]
assert edge_attr.index.duplicated().sum() == 0
# Update index
self.edge_attr = self.edge_attr.reindex(self.edge_attr.index.union(edge_attr.index))
# Update values for overlapping columns
for col in edge_attr.columns:
self.edge_attr.loc[edge_attr.index, col] = edge_attr[col].values
def get_roots(self):
"""Returns a list of the root term(s).
Returns
-------
: list
"""
tmp = set(self.terms) - set([y for x in self.parent_2_child.values() for y in x])
return sorted(tmp)
def _make_dummy(self, tree_edges=None):
"""For each term T in the ontology, create a new dummy term that
indirectly connect T's to T. For example, if g1 and g2 are in
T, then a new term dummy_T is created so that the new ontology
consists of
g1 --> T_dummy
g2 --> T_dummy
T_dummy --> T
Parameters
----------
tree_edges : list
List of (child, parent) edges that constitute a spanning
tree of the ontology. If specified, then for each term T,
only the genes that are connected to T in the spanning
tree will be re-routed to the dummy node.
Default: None. This restriction will not apply
Returns
-------
: ddot.Ontology.Ontology
"""
ont = self
new_gene_2_term = []
new_child_2_parent = []
for t in ont.terms:
used_dummy = False
if len(ont.parent_2_child[t]) > 0:
dummy_term = 'dummy2_%s' % t
else:
dummy_term = t
for g in [ont.genes[g] for g in ont.term_2_gene[t]]:
if (tree_edges is None) or (g,t) in tree_edges:
new_gene_2_term.append((g, dummy_term))
used_dummy=True
if used_dummy and dummy_term != t:
new_child_2_parent.append([dummy_term, t])
for p in ont.child_2_parent[t]:
if (tree_edges is None) or (t,p) in tree_edges:
new_child_2_parent.append((t, p))
ont_dummy = Ontology(new_child_2_parent, new_gene_2_term)
return ont_dummy
def _collect_transform(self,
tree_edges=None,
hidden_gene=True,
hidden_parent=True,
hidden_child=True):
"""
Creates intermediate duplicate nodes
"""
ont = self
if tree_edges is None:
tree_edges = self.get_tree()
nodes_copy = {v : 1 for v in ont.genes + ont.terms}
def get_copy(u):
u_name = '%s.%s' % (u, nodes_copy[u])
nodes_copy[u] += 1
return u_name
collect_nodes = []
new_gene_2_term = []
new_child_2_parent = []
for t in ont.terms:
## Gene-term connections
collect_hidden_gene = 'collect_hidden_gene_%s' % t
used_hidden_gene = False
for g in [ont.genes[g] for g in ont.term_2_gene[t]]:
if (not hidden_gene) or ((g, t) in tree_edges):
new_gene_2_term.append((g, collect_hidden_gene))
used_hidden_gene = True
else:
new_gene_2_term.append((get_copy(g), collect_hidden_gene))
used_hidden_gene = True
if used_hidden_gene:
collect_nodes.append(collect_hidden_gene)
new_child_2_parent.append((collect_hidden_gene, t))
## Parent-child term connections
collect_hidden_child = 'collect_hidden_child_%s' % t
collect_hidden_parent = 'collect_hidden_parent_%s' % t
used_hidden_child, used_hidden_parent = False, False
for c in ont.parent_2_child[t]:
if (not hidden_child) or ((c,t) in tree_edges):
new_child_2_parent.append((c,t))
else:
new_child_2_parent.append((get_copy(c), collect_hidden_child))
used_hidden_child = True
for p in ont.child_2_parent[t]:
if hidden_parent and ((t,p) not in tree_edges):
new_child_2_parent.append((get_copy(p), collect_hidden_parent))
used_hidden_parent = True
if used_hidden_child:
collect_nodes.append(collect_hidden_child)
new_child_2_parent.append((collect_hidden_child, t))
if used_hidden_parent:
collect_nodes.append(collect_hidden_parent)
new_child_2_parent.append((collect_hidden_parent, t))
ont_collect = Ontology(new_child_2_parent,
new_gene_2_term,
node_attr=ont.node_attr.copy(),
edge_attr=ont.edge_attr.copy(),
verbose=False)
##################################################
# Set Original_Name and Size for Duplicate Nodes #
new_and_orig = [('%s.%s' %(v,i), v) for v, copy_num in nodes_copy.items()
for i in (range(1, copy_num) if copy_num>1 else [])]
new_2_orig = dict(new_and_orig)
df = pd.DataFrame({'orig_tmp' : [x[1] for x in new_and_orig],
'Hidden' : True},
index=[x[0] for x in new_and_orig])
df = df.astype({'orig_tmp' : np.str, 'Hidden' : np.bool})
# For duplicate nodes, set the Original_Name attribute to the name of the original node
merge = pd.merge(df, ont.node_attr, how='left', left_on=['orig_tmp'], right_index=True)
if 'Original_Name' in merge:
unset = pd.isnull(merge['Original_Name']).values
merge.loc[unset, 'Original_Name'] = df.loc[unset, 'orig_tmp'].values
else:
merge['Original_Name'] = df['orig_tmp'].values
del merge['orig_tmp']
# Set the 'Size' attribute of duplicate nodes to be the 'Size'
# of the original node. If the original node is a term with no
# 'Size' attribute, then set 'Size' to be the number of genes
# in the term
in_merge = set(merge.index)
for node in merge.index:
if node in new_2_orig:
orig = new_2_orig[node]
if orig in in_merge and not pd.isnull(merge.loc[orig, 'Size']):
merge.loc[node, 'Size'] = merge.loc[new_2_orig[node], 'Size']
elif orig in ont.terms_index:
merge.loc[node, 'Size'] = ont.term_sizes[ont.terms_index[orig]]
# Append attributes for the new nodes
try:
# Used for pandas version >= 0.23
ont_collect.node_attr = pd.concat([ont.node_attr, merge], axis=0, sort=True)
except:
ont_collect.node_attr = pd.concat([ont.node_attr, merge], axis=0)
########################################
# Set Label and Size for collect nodes #
########################################
def get_label(x):
if 'hidden_child' in x:
return 'Linked Children'
elif 'hidden_parent' in x:
return 'Linked Parents'
elif 'hidden_gene' in x:
return 'Linked Genes'
elif 'tree_gene' in x:
return 'Genes'
collect_attr = pd.DataFrame(
{'Size' : 1,
'Label' : [get_label(x) for x in collect_nodes],
'is_collect_node' : True},
index=collect_nodes)
ont_collect.update_node_attr(collect_attr)
return ont_collect
def unfold(self,
duplicate=None,
genes_only=False,
levels=None,
tree_edges=None):
"""Traverses the ontology from the root to the leaves while
duplicating nodes during the traversal to create a tree representation.
Traverse the ontology from the root nodes to the leaves in a
breadth-first manner. Each time a node is traversed, then
create a duplicate of it
Parameters
----------
duplicate : list
Nodes to duplicate for unfolding. Default: all genes and terms
genes_only : bool
If True, then duplicate all of the genes and none of the terms. Default: False
levels :
"""
ont = self.propagate(direction='reverse', inplace=False)
hidden_mode = levels is not None
if hidden_mode:
if tree_edges is None:
tree_edges = self.get_tree()
hidden_depth = {}
if genes_only:
duplicate = ont.genes
elif duplicate is None:
duplicate = ont.genes + ont.terms
nodes_copy = {x : 0 for x in duplicate}
def get_name(u):
if u in nodes_copy:
u_name = '%s.%s' % (u, nodes_copy[u])
nodes_copy[u] += 1
else:
u_name = u
return u_name
to_expand = []
new_2_orig = {}
for u in ont.get_roots():
u_name = get_name(u)
new_2_orig[u_name] = u
to_expand.append(u_name)
if hidden_mode:
hidden_depth[u_name] = 0
expanded = set(to_expand)
hierarchy, mapping = [], []
# Manual bfs
curr = 0
while curr < len(to_expand):
v_name = to_expand[curr]
v = new_2_orig[v_name]
for u in [ont.genes[u] for u in ont.term_2_gene[v]]:
u_name = get_name(u)
new_2_orig[u_name] = u
mapping.append((u_name, v_name))
if hidden_mode:
v_depth = hidden_depth[v_name]
if v_depth==0:
if (u,v) in tree_edges:
hidden_depth[u_name] = 0
else:
hidden_depth[u_name] = 1
elif v_depth < levels:
hidden_depth[u_name] = v_depth + 1
for u in ont.parent_2_child[v]:
u_name = get_name(u)
new_2_orig[u_name] = u
hierarchy.append((u_name, v_name))
if hidden_mode:
v_depth = hidden_depth[v_name]
insert = u_name not in expanded
if v_depth==0 and ((u,v) in tree_edges):
hidden_depth[u_name] = 0
elif v_depth < levels:
hidden_depth[u_name] = v_depth + 1
else:
insert = False
else:
insert = u_name not in expanded
if insert:
to_expand.append(u_name)
expanded.add(u_name)
curr += 1
new_nodes, orig_nodes = zip(*new_2_orig.items())
new_nodes, orig_nodes = list(new_nodes), list(orig_nodes)
ont.node_attr = ont.node_attr.reindex(list(set(orig_nodes)))
node_attr = ont.node_attr.loc[orig_nodes, :].copy()
if 'Original_Name' in node_attr:
unset = pd.isnull(node_attr['Original_Name']).values
node_attr.loc[unset, 'Original_Name'] = np.array(orig_nodes)[unset]
else:
node_attr['Original_Name'] = orig_nodes
if hidden_mode:
node_attr['Level'] = [hidden_depth[v] for v in new_nodes]
node_attr.index = new_nodes
node_attr.dropna(axis=0, how='all', inplace=True)
new_edges = hierarchy + mapping
old_edges = [(new_2_orig[u], new_2_orig[v]) for u, v in new_edges]
in_index = [x in ont.edge_attr.index for x in old_edges]
if sum(in_index) > 0:
edge_attr = ont.edge_attr.loc[[x for x, y in zip(old_edges, in_index) if y], :].copy()
edge_attr.index = pd.MultiIndex.from_tuples([x for x, y in zip(new_edges, in_index) if y])
edge_attr.dropna(axis=0, how='all', inplace=True)
else:
edge_attr = None
ont = Ontology(hierarchy,
mapping,
edge_attr=edge_attr,
node_attr=node_attr,
parent_child=False,
verbose=False)
return ont
def _to_networkx_no_layout(self):
G = nx.DiGraph()
#################################
### Add nodes and node attributes
G.add_nodes_from(self.genes + self.terms)
set_node_attributes_from_pandas(G, self.node_attr)
# Ensure that all 'Size' values are the same numeric type
if 'Size' in self.node_attr.columns:
dtype = self.node_attr['Size'].dtype
if dtype in [np.dtype('float16'), np.dtype('float32'), np.dtype('float64')]:
dtype = float
else:
dtype = int
else:
dtype = int
for t in self.terms:
G.node[t][self.NODETYPE_ATTR] = self.TERM_NODETYPE
if ('Size' not in G.node[t]) or pd.isnull(G.node[t]['Size']):
G.node[t]['Size'] = dtype(self.term_sizes[self.terms_index[t]])
G.node[t]['isRoot'] = False
for g in self.genes:
G.node[g][self.NODETYPE_ATTR] = self.GENE_NODETYPE
if ('Size' not in G.node[g]) or pd.isnull(G.node[g]['Size']):
G.node[g]['Size'] = dtype(1)
G.node[g]['isRoot'] = False
# Identify the root
root = self.get_roots()[0]
G.node[root]['isRoot'] = True
# Set the node attribute 'Label'. If the node has a "Original
# Name" attribute, indicating that it is a duplicate, then use
# that. Otherwise, use the node's name.
for x in self.genes + self.terms:
data = G.node[x]
if ('Label' not in data) or pd.isnull(data['Label']):
if ('Original_Name' in data) and (not pd.isnull(data['Original_Name'])):
data['Label'] = data['Original_Name']
else:
data['Label'] = x
#################################
### Add edges and edge attributes
G.add_edges_from([(g, self.terms[t],
{self.EDGETYPE_ATTR : self.GENE_TERM_EDGETYPE}) \
for g in self.genes for t in self.gene_2_term[g]])
G.add_edges_from([(c, p,
{self.EDGETYPE_ATTR : self.CHILD_PARENT_EDGETYPE}) \
for p in self.terms for c in self.parent_2_child.get(p, [])])
set_edge_attributes_from_pandas(G, self.edge_attr)
return G
def expand(self, spanning_tree=True):
if spanning_tree is True:
ont = self._collect_transform()
else:
# Assume a list of tree edges are supplied
ont = self._collect_transform(spanning_tree)
G_tree = ont.get_tree(ret='ontology')._to_networkx_no_layout()
pos = bubble_layout_nx(G_tree)
tmp = ont.node_attr[['Label', 'is_collect_node']].dropna()
collect_nodes = tmp[tmp['is_collect_node']].index
gridify(collect_nodes, pos, G_tree)
## Remove collector nodes
def decide_delete(v):
return ((not layout_params['hidden_parent'] and v=='Linked Parents') or
(not layout_params['hidden_child'] and v=='Linked Children') or
(not layout_params['hidden_gene'] and v=='Linked Genes'))
tmp = ont.node_attr[['Label', 'is_collect_node']].dropna()
tmp = tmp[tmp['is_collect_node']]
tmp = tmp[tmp['Label'].apply(decide_delete)]
to_delete = tmp.index.tolist()
ont_red = ont
if len(to_delete) > 0:
# Need fast special delete
ont_red = ont_red.delete(to_delete=to_delete, preserve_transitivity=True)
# Set the original term sizes for the original copy of
# each term (not the duplicates)
ont_red.update_node_attr(pd.DataFrame({'Size' : self.term_sizes}, index=self.terms))
G = ont_red._to_networkx_no_layout()
nodes_set = set(G.nodes())
G.pos = {n : (float(scale*p[0]), float(scale*p[1])) for n, p in pos.items() if n in nodes_set}
nx_set_tree_edges(G, ont_red.get_tree())
######################################################
# TODO: move this visual styling outside of the layout
# functionality
nx.set_edge_attributes(G, values='ARROW', name='Vis:EDGE_SOURCE_ARROW_SHAPE')
nx.set_edge_attributes(G, values='NONE', name='Vis:EDGE_TARGET_ARROW_SHAPE')
for v, data in G.nodes(data=True):
# if 'collect_hidden' in v and 'is_collect_node' in data and data['is_collect_node']:
# for u in G.predecessors(v):
# G.node[u]['Vis:Fill Color'] = '#3182BD'
try:
if 'collect_hidden_parent' in v and 'is_collect_node' in data and data['is_collect_node']:
for _, _, data in G.in_edges(v, data=True):
data["Vis:EDGE_TARGET_ARROW_SHAPE"] = 'ARROW'
data["Vis:EDGE_SOURCE_ARROW_SHAPE"] = 'NONE'
except:
print(data)
print('v', v)
print('collect_hidden_parent' in v)
print('is_collect_node' in data)
print(data['is_collect_node'])
raise
def to_networkx(self,
layout='bubble',
spanning_tree=True,
layout_params=None,
verbose=False):
"""Converts Ontology into a NetworkX object.
Parameters
----------
node_attr : pandas.DataFrame
Meta-data about genes and terms that will be included as node
attributes in the NetworkX object.
edge_attr : pandas.DataFrame
Meta-data about connections among genes and terms that
will be included as edge attributes in the NetworkX
object.
spanning_tree : bool
If True, then identify a spanning tree of the DAG. include
an edge attribute "Is_Tree_Edge" that indicates
layout : str
The name of the layout algorithm for laying out the
Ontology as a graph. Node positions are astored in the
node attributes 'x_pos' and 'y_pos'. If None, then do not
perform a layout.
Returns
-------
: nx.DiGraph
"""
default_layout_params = {'hidden_parent' : True,
'hidden_child' : False,
'hidden_gene' : False}
if layout_params is not None:
default_layout_params.update(layout_params)
layout_params = default_layout_params
if spanning_tree:
scale = 1
if layout is None or layout=='bubble':
G = self._to_networkx_no_layout()
if spanning_tree is True:
tree_edges = self.get_tree()
else:
tree_edges = spanning_tree
nx_set_tree_edges(G, tree_edges)
if layout=='bubble':
G_tree = self.propagate('reverse')._make_dummy(tree_edges)._to_networkx_no_layout()
pos = bubble_layout_nx(G_tree, verbose=verbose)
gridify([v for v in G_tree.nodes() if 'dummy2' in v], pos, G_tree)
G.pos = {n : (float(scale*p[0]), float(scale*p[1])) for n, p in pos.items() if 'dummy2' not in n}
elif layout=='bubble-collect':
if spanning_tree is True:
ont = self._collect_transform()
else:
# Assume a list of tree edges are supplied
ont = self._collect_transform(spanning_tree)
G_tree = ont.get_tree(ret='ontology')._to_networkx_no_layout()
pos = bubble_layout_nx(G_tree)
tmp = ont.node_attr[['Label', 'is_collect_node']].dropna()
collect_nodes = tmp[tmp['is_collect_node']].index
gridify(collect_nodes, pos, G_tree)
## Remove collector nodes
def decide_delete(v):
return ((not layout_params['hidden_parent'] and v=='Linked Parents') or
(not layout_params['hidden_child'] and v=='Linked Children') or
(not layout_params['hidden_gene'] and v=='Linked Genes'))
tmp = ont.node_attr[['Label', 'is_collect_node']].dropna()
tmp = tmp[tmp['is_collect_node']]
tmp = tmp[tmp['Label'].apply(decide_delete)]
to_delete = tmp.index.tolist()
ont_red = ont
if len(to_delete) > 0:
# Need fast special delete
ont_red = ont_red.delete(to_delete=to_delete, preserve_transitivity=True)
# Set the original term sizes for the original copy of
# each term (not the duplicates)
ont_red.update_node_attr(pd.DataFrame({'Size' : self.term_sizes}, index=self.terms))
G = ont_red._to_networkx_no_layout()
nodes_set = set(G.nodes())
G.pos = {n : (float(scale*p[0]), float(scale*p[1])) for n, p in pos.items() if n in nodes_set}
nx_set_tree_edges(G, ont_red.get_tree())
######################################################
# TODO: move this visual styling outside of the layout
# functionality
nx.set_edge_attributes(G, values='ARROW', name='Vis:EDGE_SOURCE_ARROW_SHAPE')
nx.set_edge_attributes(G, values='NONE', name='Vis:EDGE_TARGET_ARROW_SHAPE')
for v, data in G.nodes(data=True):
# if 'collect_hidden' in v and 'is_collect_node' in data and data['is_collect_node']:
# for u in G.predecessors(v):
# G.node[u]['Vis:Fill Color'] = '#3182BD'
try:
if 'collect_hidden_parent' in v and 'is_collect_node' in data and data['is_collect_node']:
for _, _, data in G.in_edges(v, data=True):
data["Vis:EDGE_TARGET_ARROW_SHAPE"] = 'ARROW'
data["Vis:EDGE_SOURCE_ARROW_SHAPE"] = 'NONE'
except:
print(data)
print('v', v)
print('collect_hidden_parent' in v)
print('is_collect_node' in data)
print(data['is_collect_node'])
raise
else:
raise Exception('Unsupported layout: %s', layout)
if layout is not None:
nx.set_node_attributes(G, values={n : x for n, (x,y) in G.pos.items()}, name='x_pos')
nx.set_node_attributes(G, values={n : y for n, (x,y) in G.pos.items()}, name='y_pos')
else:
G = self._to_networkx_no_layout()
return G
@classmethod
def from_table(cls,
table,
parent=0,
child=1,
is_mapping=None,
mapping=None,
mapping_parent=0,
mapping_child=1,
header=0,
propagate=False,
verbose=False,
clixo_format=False,
clear_default_attr=True,
**kwargs):
"""Create Ontology from a tab-delimited table or pandas DataFrame.
Duplicate gene-term or term-term connections in the table are removed.
Parameters
----------
table : pandas.DataFrame, file-like object, or filename
A table that lists (child term, parent term) pairs. If
mapping==None, then this table should also include (gene,
term) pairs.
parent : int or str
Column for parent terms in table (index or name of column)
child : int or str
Column for child terms and genes in table (index or name of column)
is_mapping : function
A function that is applied on each row and returns True if
the row represents a (gene, term) pair and False
otherwise. This function is only applied when a separate
table of (gene, term) pairs is not specified,
i.e. mapping==None.
The default function is `lambda row: row[2]=={0}`
which tests if the third column equals the string "{0}".
mapping : pandas.DataFrame, file-like object, or filename (optional)
A separate table listing only (gene, term) pairs
mapping_parent : int or str
Column for terms in mapping table (index or name of column)
mappping_child : int or str
Column for genes in mapping table (index or name of column)
header : int or None
Row number to use as the column names, which are then
stored in the resulting Ontology object's `edge_attr`
field. For example if `header=0` (default), then the first
row is assumed to be column names. If `header=None`, then
no column names are assumed.
propagate : None or str
The direction ('forward' or 'reverse') for propagating
gene-term annotations up the hierarchy with
Ontology.propagate(). If None, then don't
propagate annotations.
clixo_format : bool
If True, The table is assumed to be in the same format
produced by the CLIXO C++ implementation. In particular,
table has three columns:
Column 1) Parent Term
Column 2) Child Term or Gene
Column 3) The string "gene" if the row is a
gene-term mapping, otherwise the string "default".
The table is also assumed to have no column headers (i.e. header=False)
clear_default_attr: bool
If True (default), then remove the edge attribute
'EdgeType' created using Ontology.to_table(). This
attribute was created to make the table be an equivalent
representation of an Ontology object; however, it is no
longer necessary after reconstructing the Ontology object.
Returns
-------
: ddot.Ontology.Ontology
""".format(cls.GENE_TERM_EDGETYPE)
if clixo_format:
ont = cls.from_table(
table,
parent=0,
child=1,
is_mapping=lambda x: x[2]=='gene',
header=None,
clixo_format=False,
verbose=verbose)
ont.edge_attr.columns = map(str, ont.edge_attr.columns)
del ont.edge_attr['2']
return ont
if is_mapping is None:
if mapping is None:
# print('WARNING: no gene-term connections '
# 'were specified by the is_mapping '
# 'function or separate table. '
# 'Default: assume a gene-term connection when the 3rd column equals %s' % cls.GENE_TERM_EDGETYPE)
is_mapping = lambda x: x.iloc[2]==cls.GENE_TERM_EDGETYPE
# Read table
try:
table = pd.read_table(table, comment='#', header=header)
except:
assert isinstance(table, pd.DataFrame)
if child not in table.columns:
child = table.columns[child]
if parent not in table.columns:
parent = table.columns[parent]
for col in [child, parent]:
table.loc[:,col] = table.loc[:,col].astype(str)
edge_attr = table.set_index([child, parent])
edge_attr.index.rename(['Child', 'Parent'], inplace=True)
if mapping is None:
# Extract gene-term connections from table
mask = table.apply(is_mapping, axis=1)
mapping = table.loc[mask, :].loc[:,[child, parent]]
hierarchy = table.loc[~mask, :].loc[:,[child, parent]]
mapping_child, mapping_parent = child, parent
else:
# Read separate table of gene-term connections
try:
mapping = pd.read_table(mapping, comment='#', header=header)
except:
assert isinstance(mapping, pd.DataFrame)
if mapping_child not in mapping.columns:
mapping_child = mapping.columns[mapping_child]
if mapping_parent not in mapping.columns:
mapping_parent = mapping.columns[mapping_parent]
for col in [mapping_child, mapping_parent]:
mapping.loc[:,col] = mapping.loc[:,col].astype(str)
mapping_attr = mapping.set_index([mapping_child, mapping_parent])
mapping_attr.index.rename(['Child', 'Parent'], inplace=True)
try:
# Used for pandas version >= 0.23
edge_attr = pd.concat([edge_attr, mapping_attr], sort=True)
except:
edge_attr = pd.concat([edge_attr, mapping_attr])
mapping = mapping.loc[:,[mapping_child, mapping_parent]]
hierarchy = table.loc[:,[child, parent]]
dups = mapping.duplicated([mapping_child, mapping_parent]).sum()
if dups > 0:
print('WARNING: Dropping %s duplicate gene-term connections' % dups)
mapping.drop_duplicates([mapping_child, mapping_parent], inplace=True)
dups = hierarchy.duplicated([child, parent]).sum()
if dups > 0:
print('WARNING: Dropping %s duplicate term-term connections' % dups)
hierarchy.drop_duplicates([child, parent], inplace=True)
edge_attr = edge_attr.loc[~ edge_attr.index.duplicated(), :]
edge_attr.index.names = ['Child', 'Parent']
if clear_default_attr:
if cls.EDGETYPE_ATTR in edge_attr:
del edge_attr[cls.EDGETYPE_ATTR]
mapping, hierarchy = mapping.values.tolist(), hierarchy.values.tolist()
return cls(hierarchy,
mapping,
parent_child=False,
edge_attr=edge_attr,
propagate=propagate,
verbose=verbose,
**kwargs)
@classmethod
def from_scipy_linkage(cls, Z):
"""Creates an Ontology object from a linkage matrix created by scipy's
hierarchical/agglomerative clustering. Note that this form of
clustering produces a binary tree.
"""
import scipy.cluster.hierarchy
rootnode, nodelist = scipy.cluster.hierarchy.to_tree(Z, rd=True)
leaves = set(scipy.cluster.hierarchy.leaves_list(Z))
hierarchy, mapping = [], []
for v in nodelist:
v_id = v.get_id()
if v.get_left():
child = v.get_left().get_id()
if child in leaves:
mapping.append((v_id, child))
else:
hierarchy.append((v_id, child))
if v.get_right():
child = v.get_right().get_id()
if child in leaves:
mapping.append((v_id, child))
else:
hierarchy.append((v_id, child))
return cls(hierarchy, mapping, parent_child=True)
@classmethod
def from_ndex(cls,
ndex_uuid,
ndex_user=None,
ndex_pass=None,
ndex_server=None,
edgetype_attr=None,
edgetype_value=None):
"""Reads an Ontology stored on NDEx. Gene and terms are distinguished
according by an edge attribute.
Parameters
----------
ndex_uuid : str
NDEx UUID of ontology
edgetype_attr : str
Name of the edge attribute that distinguishes a (gene,
term) pair from a (child term, parent term) pair
gene_value : str
Value of the edge attribute for (gene, term) pairs
Returns
-------
: ddot.Ontology.Ontology
"""
if ndex_server is None:
ndex_server = ddot.config.ndex_server
if ndex_user is None:
ndex_user = ddot.config.ndex_user
if ndex_pass is None:
ndex_pass = ddot.config.ndex_pass
if '/' in ndex_uuid:
ndex_server = parse_ndex_server(ndex_uuid)
ndex_uuid = parse_ndex_uuid(ndex_uuid)
G = NdexGraph(
server=ndex_server,
username=ndex_user,
password=<PASSWORD>,
uuid=ndex_uuid)
return cls.from_NdexGraph(
G,
edgetype_attr=edgetype_attr,
edgetype_value=edgetype_value)
@classmethod
def from_NdexGraph(cls,
G,
edgetype_attr=None,
edgetype_value=None):
"""Converts a NdexGraph object to an Ontology object. Gene and terms
are distinguished by an edge attribute.
Parameters
----------
G : NdexGraph
edgetype_attr : str
Name of the edge attribute that distinguishes a (gene,
term) pair from a (child term, parent term) pair
gene_value : str
Value of the edge attribute for (gene, term) pairs
Returns
-------
: ddot.Ontology.Ontology
"""
return cls.from_networkx(
NdexGraph_to_nx(G),
edgetype_attr=edgetype_attr,
edgetype_value=edgetype_value)
@classmethod
def from_networkx(cls,
G,
edgetype_attr=None,
edgetype_value=None,
clear_default_attr=True):
"""Converts a NetworkX object to an Ontology object. Gene and terms
are distinguished by an edge attribute.
Parameters
----------
G : nx.DiGraph
edgetype_attr : str
Name of the edge attribute that distinguishes a (gene,
term) pair from a (child term, parent term) pair
edgetype_value : str
Value of the edge attribute for (gene, term) pairs
clear_default_attr : bool
If True (default), then remove the node and edge
attributes that are created in a NetworkX graph using
Ontology.to_networkx() or Ontology.to_ndex(). These
attributes include 'Label', 'Size', 'NodeType', and
'EdgeType'. These attributes were created to make the
NetworkX graph be an equivalent representation of an
Ontology object; however, they are no longer necessary
after reconstrcting the Ontology object.
Returns
-------
: ddot.Ontology.Ontology
"""
if edgetype_attr is None:
edgetype_attr=cls.EDGETYPE_ATTR
if edgetype_value is None:
edgetype_value=cls.GENE_TERM_EDGETYPE
hierarchy = []
mapping = []
for u, v, attr in G.edges(data=True):
if attr[edgetype_attr] == edgetype_value:
mapping.append((u, v))
else:
hierarchy.append((u, v))
edge_attr = nx_edges_to_pandas(G)
node_attr = nx_nodes_to_pandas(G)
ont = cls(hierarchy,
mapping,
node_attr=node_attr,
edge_attr=edge_attr)
if clear_default_attr:
for attr in [Ontology.NODETYPE_ATTR, 'Label', 'Size', 'isRoot', 'x_pos', 'y_pos']:
if attr in ont.node_attr.columns:
del ont.node_attr[attr]
for attr in [edgetype_attr, 'Is_Tree_Edge']:
if attr in ont.edge_attr.columns:
del ont.edge_attr[attr]
return ont
@classmethod
def from_igraph(cls,
G,
edgetype_attr=None,
edgetype_value=None,
verbose=False):
"""Converts a igraph Graph object to an Ontology object. Gene and terms
are distinguished by an edge attribute.
Parameters
----------
G : igraph.Graph
edgetype_attr : str
Name of the edge attribute that distinguishes a (gene,
term) pair from a (child term, parent term) pair
edgetype_value : str
Value of the edge attribute for (gene, term) pairs
Returns
-------
: ddot.Ontology.Ontology
"""
if edgetype_attr is None:
edgetype_attr=cls.EDGETYPE_ATTR
if edgetype_value is None:
edgetype_value=cls.GENE_TERM_EDGETYPE
hierarchy = []
mapping = []
for e in G.es:
u = G.vs[e.source]['name']
v = G.vs[e.target]['name']
if e[edgetype_attr] == edgetype_value:
mapping.append((u, v))
else:
hierarchy.append((u, v))
edge_attr = ig_edges_to_pandas(G)
node_attr = ig_nodes_to_pandas(G)
edge_attr.index.names = ['Child', 'Parent']
node_attr.index.name = 'Node'
ont = cls(hierarchy,
mapping,
node_attr=node_attr,
edge_attr=edge_attr,
verbose=verbose)
for attr in [Ontology.NODETYPE_ATTR]:
if attr in ont.node_attr.columns:
del ont.node_attr[attr]
for attr in [edgetype_attr, 'Is_Tree_Edge']:
if attr in ont.edge_attr.columns:
del ont.edge_attr[attr]
return ont
def collapse_ontology(self,
method='python',
to_keep=None,
min_term_size=2,
verbose=True):
"""Remove redundant and empty terms. When a term T is removed,
hierarchical relations are preserved by connecting every child
of T with every parent of T. This removal operation has the
nice property of being commutative, i.e. the order of removal
does not matter.
Parameters
-----------
method : str
If "mhkramer", then use the collapseRedundantNodes script
in the alignOntology package. If "python", then use an
internal Python script.
min_term_size : int
Remove terms that are below this size. TODO: not yet supported
Returns
-------
: ddot.ddot.Ontology
A new Ontology object
"""
if method=='mhkramer':
assert to_keep is None, 'to_keep is only supported for method=="python"'
# Propagate forward and then reverse
ont = self.copy()
ont = self.propagate(direction='forward', inplace=False)
ont.propagate(direction='reverse', inplace=True)
top_level = os.path.dirname(os.path.abspath(inspect.getfile(ddot)))
collapseRedundantNodes = os.path.join(top_level, 'alignOntology', 'collapseRedundantNodes')
# assert os.path.isdir(ddot.config.alignOntology)
# collapseRedundantNodes = os.path.join(ddot.config.alignOntology, 'collapseRedundantNodes')
assert os.path.isfile(collapseRedundantNodes)
with tempfile.NamedTemporaryFile('w', delete=False) as f:
ont.to_table(f, clixo_format=True)
try:
cmd = '%s %s' % (collapseRedundantNodes, f.name)
print('collapse command:', cmd)
p = Popen(shlex.split(cmd), shell=False, stdout=PIPE, stderr=PIPE)
collapsed, err = p.communicate()
collapsed = collapsed.decode()
finally:
os.remove(f.name)
ont = Ontology.from_table(
StringIO(collapsed),
is_mapping=lambda x: x[2]=='gene',
clixo_format=True
)
ont.clear_edge_attr()
ont.update_node_attr(self.node_attr)
ont.update_edge_attr(self.edge_attr)
return ont
elif method=='python':
ont = self.propagate('forward', inplace=False)
term_hash = {t : hash(tuple(g_list)) for t, g_list in ont.term_2_gene.items()}
to_collapse = set()
for p in ont.parent_2_child:
for c in ont.parent_2_child[p]:
if term_hash[p] == term_hash[c]:
to_collapse.add(p)
if min_term_size is not None:
to_collapse = to_collapse | set([t for t, s in zip(ont.terms, ont.term_sizes) if s < min_term_size])
if to_keep is not None:
to_collapse = to_collapse - set(to_keep)
# print('to_collapse:', sorted(to_collapse))
ont.propagate('reverse', inplace=True)
ont_red = ont.delete(to_delete=to_collapse, preserve_transitivity=True)
return ont_red
@classmethod
def mutual_collapse(cls,
ont1,
ont2,
verbose=False):
"""Collapses two ontologies to the common set of genes.
Parameters
-----------
ont1 : ddot.Ontology.Ontology
ont2 : ddot.Ontology.Ontology
Returns
-------
ont1_collapsed : ddot.Ontology.Ontology
ont2_collapsed : ddot.Ontology.Ontology
"""
common_genes = set(ont1.genes) & set(ont2.genes)
if verbose:
print('Common genes:', len(common_genes))
if len(common_genes) > 0:
ont1 = ont1.delete(to_delete=set(ont1.genes) - common_genes, inplace=False)
ont1_collapsed = ont1.collapse_ontology()
ont2 = ont2.delete(to_delete=set(ont2.genes) - common_genes, inplace=False)
ont2_collapsed = ont2.collapse_ontology()
else:
raise Exception('No common genes between ontologies')
if verbose:
print('ont1_collapsed:', ont1_collapsed.summary())
print('ont2_collapsed:', ont2_collapsed.summary())
return ont1_collapsed, ont2_collapsed
def focus(self,
branches=None,
genes=None,
collapse=False,
root=True,
verbose=True):
"""
"""
assert (branches is not None) or (genes is not None)
to_keep = np.array(self.genes + self.terms)
if branches is not None:
to_keep = to_keep[self.connected(to_keep, branches).sum(1) > 0]
if verbose:
print('Genes and Terms to keep:', to_keep.size)
if genes is not None:
to_keep = to_keep[self.connected(genes, to_keep).sum(0) > 0]
if verbose:
print('Genes and Terms to keep:', to_keep.size)
if root:
while True:
common_root = self.common_ancestors(to_keep, minimal=True)
if common_root in to_keep or len(common_root)<=1:
break
else:
print('Adding', common_root)
to_keep = np.append(to_keep, common_root)
ont = self.delete(to_keep=to_keep, preserve_transitivity=True)
if collapse:
ont = ont.collapse_ontology(method='python', to_keep=ont.get_roots())
df = ont.to_table(edge_attr=True)
new_connections = []
for t in ont.terms:
removed_genes = set([self.genes[g] for g in self.term_2_gene[t]]) - set([ont.genes[g] for g in ont.term_2_gene[t]])
removed_terms = set(self.parent_2_child[t]) - set(ont.parent_2_child[t])
if len(removed_genes) > 0:
new_connections.append(('%s_%s_other_genes' % (t, len(removed_genes)), t, self.GENE_TERM_EDGETYPE))
if len(removed_terms) > 0:
new_connections.append(('%s_%s_other_terms' % (t, len(removed_terms)), t, self.CHILD_PARENT_EDGETYPE))
if len(new_connections) > 0:
new_connections = pd.DataFrame(new_connections)
new_connections.columns = ['Child', 'Parent', self.EDGETYPE_ATTR]
new_nodes = new_connections['Child'].values.tolist()
new_connections['Summary'] = True
df['Summary'] = False
try:
# Used for pandas version >= 0.23
tmp = pd.concat([df, new_connections], ignore_index=True, sort=True)
except:
tmp = pd.concat([df, new_connections], ignore_index=True)
df = tmp[df.columns]
ont = Ontology.from_table(df)
ont.update_node_attr(self.node_attr)
# orig_sizes = pd.DataFrame({'Original_Size' : self.term_sizes}, index=self.terms)
# ont.update_node_attr(orig_sizes)
# if len(new_connections)>0:
# summary_sizes = pd.DataFrame({'Original_Size' : [int(x.split('_')[1]) for x in new_nodes]}, index=new_nodes)
# ont.update_node_attr(summary_sizes)
if len(new_connections) > 0:
ont.update_node_attr(pd.DataFrame({'Label':['_'.join(x.split('_')[1:]) for x in new_nodes]}, index=new_nodes))
return ont
def delete(self,
to_delete=None,
to_keep=None,
preserve_transitivity=True,
inplace=False):
"""Delete genes and/or terms from the ontology.
Parameters
----------
to_delete : array-like (optional)
Names of genes and/or terms to delete. Either to_delete or
to_keep must be specified.
to_keep : array-like (optional)
Names of genes and/or terms to keep; all other genes/terms
are delete. Only used if to_delete is not specified.
preserve_transitivity : bool
If True, then maintain transitive relations when deleting
terms. For example, if the hierarchical structure consists
of
geneA --> term1
term1 --> term2
term2 --> term3
term2 --> term4
then deleting term2 will result in the structure:
geneA --> term1
term1 --> term3
term3 --> term4
If False, then deleting term2 will result in a
disconnected structure:
geneA --> term1
inplace : bool
If True, then modify the ontology. If False, then create and modify a copy.
Returns
-------
: ddot.Ontology.Ontology
"""
if inplace:
ont = self
else:
ont = self.copy()
if to_delete is not None:
terms = set([x for x in to_delete if x in ont.terms_index])
genes = set([x for x in to_delete if x in ont.genes_index])
elif to_keep is not None:
terms = set(ont.terms) - set([x for x in to_keep if x in ont.terms_index])
genes = set(ont.genes) - set([x for x in to_keep if x in ont.genes_index])
else:
raise Exception('Must specify nodes to delete or to keep')
if len(genes) > 0:
ont.genes = [g for g in ont.genes if g not in genes]
ont.genes_index = make_index(ont.genes)
ont.gene_2_term = {g : t for g, t in ont.gene_2_term.items()
if g not in genes}
ont._update_fields()
if len(terms) > 0:
if preserve_transitivity:
gene_2_term_set = {g : set([ont.terms[s] for s in t]) for g, t in ont.gene_2_term.items()}
term_2_gene_set = {a : set(b) for a, b in ont.term_2_gene.items()}
child_2_parent_set = {a : set(b) for a, b in ont.child_2_parent.items()}
parent_2_child_set = {a : set(b) for a, b in ont.parent_2_child.items()}
for t in terms:
t_parents = child_2_parent_set[t]
t_genes = term_2_gene_set[t]
t_children = parent_2_child_set[t]
for g_i in t_genes:
g = ont.genes[g_i]
gene_2_term_set[g].update(t_parents)
gene_2_term_set[g].remove(t)
for p in t_parents:
term_2_gene_set[p].update(t_genes)
parent_2_child_set[p].update(t_children)
parent_2_child_set[p].remove(t)
for c in t_children:
child_2_parent_set[c].update(t_parents)
child_2_parent_set[c].remove(t)
del child_2_parent_set[t]
del parent_2_child_set[t]
del term_2_gene_set[t]
ont.terms = [t for t in ont.terms if t not in terms]
terms_index = make_index(ont.terms)
ont.terms_index = terms_index
ont.gene_2_term = {g : sorted([terms_index[s] for s in t]) for g, t in gene_2_term_set.items()}
ont.child_2_parent = {c : sorted(p) for c, p in child_2_parent_set.items()}
ont.parent_2_child = invert_dict(ont.child_2_parent)
ont._update_fields()
else:
tmp_gene_2_term = {g : [ont.terms[t] for t in t_list]
for g, t_list in ont.gene_2_term.items()}
ont.terms = [t for t in ont.terms if t not in terms]
ont.terms_index = make_index(ont.terms)
ont.gene_2_term = {g : [ont.terms_index[t] for t in t_list if t not in terms]
for g, t_list in tmp_gene_2_term.items()}
ont.parent_2_child = {p : [c for c in c_list if c not in terms]
for p, c_list in ont.parent_2_child.items()
if p not in terms}
ont._update_fields()
# Update node/edge attributes
to_keep = (set(ont.terms) | set(ont.genes)) - genes - terms
ont.edge_attr = ont.edge_attr[ont.edge_attr.index.get_level_values(0).isin(to_keep) | \
ont.edge_attr.index.get_level_values(1).isin(to_keep)]
ont.node_attr = ont.node_attr[ont.node_attr.index.isin(to_keep)]
return ont
def rename(self,
genes=lambda x: x,
terms=lambda x: x,
inplace=False):
"""Rename gene and/or term names.
Parameters
----------
genes : dict or function
If dictionary, then it maps current gene names to new
names. Genes not in dictionary are deleted.
If function, then genes(name) returns the new name.
terms : dict or function
If dictionary, then it maps current term names to new
names. Terms not in dictionary are deleted.
If function, then terms(name) returns the new name.
inplace : bool
If True, then modify the ontology. If False, then create
and modify a copy.
Returns
-------
: ddot.Ontology.Ontology
"""
try:
terms = {t : terms(t) for t in self.terms}
except:
pass
try:
genes = {g : genes(g) for g in self.genes}
except:
pass
if inplace:
ont = self
else:
ont = self.copy()
if genes:
new_genes = set()
new_gene_2_term = {}
for g in ont.genes:
new_g = genes.get(g, g)
if hasattr(new_g, '__iter__') and not isinstance(new_g, str):
for new_gg in new_g:
new_genes.add(new_gg)
new_gene_2_term[new_gg] = ont.gene_2_term[g]
else:
new_genes.add(new_g)
new_gene_2_term[new_g] = ont.gene_2_term[g]
ont.genes = sorted(new_genes)
ont.gene_2_term = new_gene_2_term
ont.genes_index = make_index(ont.genes)
ont._update_fields()
if terms:
ont.parent_2_child = {terms.get(p, p) : [terms.get(c, c) for c in c_list]
for p, c_list in ont.parent_2_child.items()}
old_term_names = ont.terms
ont.terms = [terms.get(t,t) for t in ont.terms]
# Retain a unique set of term names
ont.terms = sorted(set(ont.terms))
ont.terms_index = make_index(ont.terms)
ont.gene_2_term = {g : [ont.terms_index[terms.get(t,t)] for t in [old_term_names[t] for t in t_list]] for g, t_list in ont.gene_2_term.items()}
ont._update_fields()
conversions = genes.copy()
conversions.update(terms)
# Remove identities
conversions = {k : v for k, v in conversions.items() if k!=v}
f = lambda x: conversions.get(x,x)
# Update node attributes
index = ont.node_attr.index
ont.node_attr.index = pd.Series(index).map(f)
# Update edge attributes
idx = ont.edge_attr.index
idx.set_levels([idx.levels[0].map(f), idx.levels[1].map(f)], inplace=True)
ont._check_valid()
return ont
def _check_valid(self):
if not self.is_dag():
print('Found cycle:', nx.find_cycle(self._to_networkx_no_layout()))
raise Exception('Not a directed acyclic graph')
assert len(self.genes) == len(set(self.genes))
assert len(self.terms) == len(set(self.terms))
assert set(self.genes) == set(self.gene_2_term.keys())
assert set(self.terms) == set(self.child_2_parent.keys())
assert set(self.terms) == set(self.parent_2_child.keys())
assert set(self.terms) == set(self.term_2_gene.keys())
assert self.edge_attr.index.duplicated().sum()==0
assert self.node_attr.index.duplicated().sum()==0
def to_table(self,
output=None,
term_2_term=True,
gene_2_term=True,
edge_attr=False,
header=True,
parent_child=True,
clixo_format=False):
"""Convert Ontology to a table representation. Return a
pandas.DataFrame and, optionally, write it to a file as a
tab-delimited file.
Parameters
----------
output : filepath or file-like
File to write table. If None, then only return a
pandas.DataFrame
term_2_term : bool
Include (child term, parent term) pairs
gene_2_term : bool
Include (gene, term) pairs
edge_attr : array-like or bool
List of extra edge attributes to include. If True, then
include all attributes. If False, then don't include any
attribute.
header : bool
If True (default), then write the column names as the
first row of the table.
parent_child : bool
If True, then the first column is the parent term and the
second column is the child term or gene. If False, then
the columns are reversed.
clixo_format : bool
If True, the table is the same format used the CLIXO C++
implementation. In particular, the table has three columns:
Column 1) Parent Term
Column 2) Child Term or Gene
Column 3) The string "gene" if the row is a
gene-term mapping, otherwise the string "default".
Returns
-------
: pandas.DataFrame
Contains at least three columns: (1) "Parent", (2)
"Child", and (3) "EdgeType".
"""
if clixo_format:
df = self.to_table(output=None,
term_2_term=True,
gene_2_term=True,
edge_attr=False,
header=False,
parent_child=True,
clixo_format=False)
df.replace({self.EDGETYPE_ATTR : {self.GENE_TERM_EDGETYPE : 'gene', self.CHILD_PARENT_EDGETYPE : 'default'}}, inplace=True)
if output is not None:
df.to_csv(output, header=False, index=False, sep='\t')
return df
df = pd.DataFrame(columns=['Parent','Child',self.EDGETYPE_ATTR])
if term_2_term:
df = df.append(self._hierarchy_to_pandas(), ignore_index=True)
if gene_2_term:
df = df.append(self._mapping_to_pandas(), ignore_index=True)
if edge_attr and self.edge_attr.shape[1] > 0:
if edge_attr==True:
edge_attr = df.columns
df = df.merge(self.edge_attr,
how='left',
left_on=['Child', 'Parent'],
right_index=True)
first_two = ['Parent', 'Child'] if parent_child else ['Child', 'Parent']
df = df[first_two + [x for x in df.columns if x not in first_two]]
if output is not None:
df.to_csv(output, header=header, index=False, sep='\t')
return df
def _hierarchy_to_pandas(self):
triples = [(p,c) for p, c_list in self.parent_2_child.items() for c in c_list]
df = pd.DataFrame(triples, columns=['Parent', 'Child'])
df[self.EDGETYPE_ATTR] = self.CHILD_PARENT_EDGETYPE
return df
def _mapping_to_pandas(self):
pairs = [(self.terms[t], g) for g, t_list in self.gene_2_term.items() for t in t_list]
df = pd.DataFrame(pairs, columns=['Parent', 'Child'])
df[self.EDGETYPE_ATTR] = self.GENE_TERM_EDGETYPE
return df
def copy(self):
"""Create a deep copy of the Ontology object"""
ont = Ontology(None, None, **{'empty' : True})
for x in ['node_attr', 'edge_attr']:
setattr(ont, x, getattr(self, x).copy())
for x in ['genes', 'terms']:
setattr(ont, x, getattr(self, x)[:])
if self._term_sizes is None:
ont._term_sizes = None
else:
ont._term_sizes = self._term_sizes[:]
for x in ['genes_index', 'terms_index']:
setattr(ont, x, getattr(self, x).copy())
for x in ['gene_2_term', 'term_2_gene', 'child_2_parent', 'parent_2_child']:
copy_val = {k : v[:] for k, v in getattr(self, x).items()}
setattr(ont, x, copy_val)
return ont
def flatten(self,
include_genes=True,
include_terms=False,
similarity='Resnik'):
"""Flatten the hierarchy into a node-node similarity matrix by
calculating a similarity between pair of genes in
`genes_subset`. Currently, only the Resnik semantic similarity
measure is implemented.
Parameters
-----------
include_genes : bool
If True, then calculate pairwise similarities between
genes. If `include_terms` is also True, then also
calculate similarities between genes and terms.
include_terms : bool
If True, then calculate pairwise similarities between
terms. If `include_genes` is also True, then also
calculate similarities between genes and terms.
similarity : str
Type of semantic similarity. (default: "Resnik")
The Resnik similarity s(g1,g2) is defined as
:math:`-log_2(|T_{sca}| / |T_{root}|)` where :math:`|T|` is
the number of genes in `genes_subset` that are under term
T. :math:`T_{sca}` is the "smallest common ancestor", the
common ancestral term with the smallest term
size. :math:`T_{root}` is the root term of the ontology.
<NAME>. (1999). Semantic similarity in a taxonomy: An
information-based measured and its application to problems
of ambiguity in natural
language. <NAME>. Intell. Res. 11,95-130.
Returns
-------
: (sim, nodes)
A 2-tuple consisting of `sim`, a node-by-node NumPy array,
and `nodes`, a NumPy array of the node names in `sim`.
"""
assert include_genes
assert not include_terms, 'include_terms is not yet implemented'
if similarity=='Resnik':
sca, nodes = self.get_best_ancestors(include_genes=include_genes)
nodes_subset = self.genes if include_genes else []
nodes_subset += self.terms if include_terms else []
nodes_idx = ddot.utils.make_index(nodes)
idx = [nodes_idx[v] for v in nodes_subset]
sca = sca[idx, :][:, idx]
ss = -1 * np.log2(np.array(self.term_sizes)[sca] / float(len(self.genes)))
ss = ss.astype(np.float32)
return ss, np.array(nodes_subset)
else:
raise Exception('Unsupported similarity type')
def common_ancestors(self, nodes, min_nodes='all', minimal=True):
"""Return the common ancestors of a set of genes
Parameters
----------
nodes : list
List of nodes (genes and/or terms) to find the common ancestors
min_nodes : str or int
If 'all', then return only terms that contain all of the
input genes. If an integer, then return only terms that
contain at least <nodes> of the input genes
minimal : bool
If True, then do NOT return the terms that are themselves
ancestors of the other common ancestors. This filter
leaves only the 'minimal' set of common ancestors.
Returns
-------
: list
List of common ancestors
"""
if min_nodes=='all':
min_nodes = len(nodes)
conn = self.connected(nodes, self.terms)
anc_bool = conn.sum(0) >= min_nodes
anc = np.array(self.terms)[anc_bool]
if minimal:
anc_conn = self.connected(anc, anc, sparse=False)
np.fill_diagonal(anc_conn, 0)
anc = anc[anc_conn.sum(0) == 0]
return anc
def _get_term_2_gene(self, verbose=False):
if verbose: print('Calculating term_2_gene')
term_2_gene = invert_dict(
self.gene_2_term,
keymap=make_index(self.genes),
valmap=dict(enumerate(self.terms)))
for t in self.terms:
if not t in term_2_gene:
term_2_gene[t] = []
return term_2_gene
@property
def term_sizes(self):
if self._term_sizes is None:
self._term_sizes = self._get_term_sizes(propagate=True)
return self._term_sizes
def _get_term_sizes(self, propagate=True):
"""Returns an array of term sizes in the same order as self.terms"""
if propagate:
ont = self.propagate(inplace=False)
gene_2_term = ont.gene_2_term
# gene_2_term = self._propagate_forward()
else:
gene_2_term = self.gene_2_term
tmp = Counter([x for y in gene_2_term.values() for x in y])
term_sizes = [tmp[x] for x in range(len(self.terms))]
return term_sizes
def get_information_gain(self):
for p in terms:
self.parent_2_children[p]
def shuffle_genes(self, inplace=False):
"""Shuffle the names of genes"""
new_order = self.genes.copy()
random.shuffle(new_order)
rename = dict(zip(self.genes, new_order))
return self.rename(rename, inplace=False)
def get_tree(self, ret='edges', verbose=False):
"""Identify a spanning tree of the DAG (including genes as part of the
DAG).
Parameters
------------
ret : str
If 'edges', then return a list of (u, v) edges in the
tree. If 'ontology', return an Ontology object consisting
of only the tree edges.
Returns
-------
: array-like or Ontology
"""
tree = self.to_igraph(include_genes=True, spanning_tree=True)
if ret=='edges':
tree_edges = set([(tree.vs[e.source]['name'],
tree.vs[e.target]['name'])
for e in tree.es if e['Is_Tree_Edge']=='Tree'])
return tree_edges
elif ret=='ontology':
tree.delete_edges([e.index for e in tree.es if e['Is_Tree_Edge']=='Not_Tree'])
return Ontology.from_igraph(tree, verbose=verbose)
def is_dag(self):
"""Return True if the Ontology is a valid directed acyclic graph,
False otherwise.
"""
return self.to_igraph(include_genes=True, spanning_tree=False).is_dag()
def topological_sorting(self, top_down=True, include_genes=False):
"""Perform a topological sorting.
top_down :
If True, then ancestral nodes (e.g. the root nodes) come
before descendants in the sorting. If False, then reverse the sorting
"""
graph = self.to_igraph(include_genes=include_genes, spanning_tree=False)
topo = list(graph.vs[graph.topological_sorting(mode='out')]['name'])
if not top_down:
topo = topo[::-1]
return topo
def to_igraph(self, include_genes=True, spanning_tree=False):
"""Convert Ontology to an igraph.Graph object. Gene and term names are
stored in the 'name' vertex attribute of the igraph object.
Parameters
----------
include_genes : bool
Include genes as vertices in the igraph object.
spanning_tree : bool
If True, then identify a spanning tree of the DAG. include
an edge attribute "Is_Tree_Edge" that indicates
Returns
-------
: igraph.Graph
"""
if include_genes:
terms_index_offset = {t : v + len(self.genes) for t, v in self.terms_index.items()}
gene_term_edges = [(self.genes_index[g], terms_index_offset[self.terms[t]])
for g in self.genes
for t in self.gene_2_term[g]]
child_parent_edges = [(terms_index_offset[c], terms_index_offset[p])
for p, children in self.parent_2_child.items()
for c in children]
vertex_attrs = self.node_attr.reindex(index=self.genes + self.terms).loc[self.genes + self.terms].to_dict(orient='list')
vertex_attrs.update({
'name':self.genes + self.terms,
self.NODETYPE_ATTR:[self.GENE_NODETYPE for x in self.genes] + [self.TERM_NODETYPE for x in self.terms]
})
graph = igraph.Graph(n=len(self.genes) + len(self.terms),
edges=gene_term_edges + child_parent_edges,
directed=True,
vertex_attrs=vertex_attrs,
edge_attrs={self.EDGETYPE_ATTR : [self.GENE_TERM_EDGETYPE for x in gene_term_edges] + \
[self.CHILD_PARENT_EDGETYPE for x in child_parent_edges]})
else:
edges = [(self.terms_index[c], self.terms_index[p]) for p, children in self.parent_2_child.items() for c in children]
graph = igraph.Graph(n=len(self.terms),
edges=edges,
directed=True,
vertex_attrs={'name':self.terms},
edge_attrs={self.EDGETYPE_ATTR : [self.CHILD_PARENT_EDGETYPE for x in edges]})
if spanning_tree:
parent_priority = [self.term_sizes[self.terms_index[v['name']]] if (v['name'] in self.terms_index) else 1 for v in graph.vs]
# Identify spanning tree
graph = self._make_tree_igraph(
graph,
parent_priority=parent_priority,
optim=min,
edge_name='Is_Tree_Edge')
graph.es['Is_Tree_Edge'] = ['Tree' if x else 'Not_Tree' for x in graph.es['Is_Tree_Edge']]
return graph
def shortest_paths(self,
descendants=None,
ancestors=None,
sparse=False,
weights=None,
chunk_size=500):
"""Calculate the length of the shortest paths from descendant nodes to
ancestor nodes.
Parameters
----------
sparse : bool
If True, return a scipy.sparse matrix. If False, return a
NumPy array
weights : dict
Dictionary mapping (child term, parent term) or (gene,
term) edges to weights. Any edge with no given weight is
assigned a weight of 0 by default.
(default) If weights is None, then a uniform weight is
assumed.
chunk_size : int (optional)
Computational optimization: shortest paths are calculated in batches.
Returns
-------
d : np.ndarray or scipy.sparse.spmatrix
d[x,y] is the length of the shortest directed path from a
descendant node x to ancestor node y. d[x,y]==numpy.inf if
no directed path exists. The rows are in the same order as
<descendants>, and the columns are in the same order as
<ancestors>.
"""
graph = self.to_igraph(include_genes=True, spanning_tree=False)
import numbers
if weights is None:
weights = 1
if weights is not None and not isinstance(weights, numbers.Number):
# Assume dictionary
weights = [weights.get((graph.vs[e.source]['name'],
graph.vs[e.target]['name']), 0) for e in graph.es]
graph.es['weight'] = weights
if descendants is None:
descendants = graph.vs
if ancestors is None:
ancestors = descendants
tmp = [graph.shortest_paths(
descendants[x[0]:x[1]],
ancestors,
weights='weight',
mode='out')
for x in split_indices_chunk(len(descendants), chunk_size)]
if sparse:
return scipy.sparse.vstack([scipy.sparse.csr_matrix(x) for x in tmp])
else:
return np.vstack(tmp)
def longest_paths(self,
descendants=None,
ancestors=None,
sparse=False,
weights=None,
chunk_size=500):# TODO: when ancestors are specified, the results become negative
"""Computes the lengths of the longest directed paths between all pairs
of terms.
Returns
-------
d : np.ndarray or scipy.sparse.spmatrix
d[x,y] is the length of the longest directed path from a
descendant term with index x to an ancestral term with
index y, where indices are defined by
self.terms. d[x,y]==numpy.inf if no directed path exists.
"""
d = self.shortest_paths(descendants=descendants,
ancestors=ancestors,
sparse=sparse,
weights=-1,
chunk_size=chunk_size)
if sparse:
d.data = -1 * d.data
else:
d = -1 * d
return d
def connected(self,
descendants=None,
ancestors=None,
sparse=False):
"""Calculate which genes or terms are descendants of other genes or
terms.
Parameters
-----------
descendants: list
A list of genes and/or terms. Default: A list of all genes
followed by a list of all terms, in the same order as
`self.genes` and `self.terms`.
ancestors: list
A list of genes and/or terms. Default: Same as the
``descendants`` parameter.
sparse : bool
If True, return a scipy.sparse matrix. If False (default),
return a NumPy array.
Returns
-------
d : np.ndarray or scipy.sparse.matrix
A descendants-by-ancestors matrix. ``d[i,j]`` is 1 if term
i is a descendant of term j, and 0 otherwise. Note that
``d[i,i]==1`` and ``d[root,i]==0``, for every i.
"""
d = self.shortest_paths(descendants=descendants,
ancestors=ancestors,
sparse=sparse)
if sparse:
d.data = np.isfinite(d.data)
else:
d = np.isfinite(d)
return d
# def get_leaves(self, terms_list, children_list=None):
# """Returns terms in ``terms_list`` that are not ancestors of any term in
# ``children_list``.
# Parameters
# ----------
# terms_list : list
# children_list : list
# If ``children_list`` is None, then select the terms in
# <terms_list> that are not ancestors of any of the other
# terms in <terms_list>.
# """
# connectivity_matrix_nodiag = self.get_connectivity_matrix_nodiag()
# terms_list = np.array(terms_list)
# if children_list is None:
# children_list = terms_list
# else:
# children_list = np.array(children_list)
# return terms_list[~ np.any(connectivity_matrix_nodiag[children_list, :][:, terms_list], axis=0)]
def propagate(self,
direction='forward',
gene_term=True,
term_term=False,
verbose=False,
inplace=False):
"""Propagate gene-term annotations through the ontology.
As an example, consider an ontology with one gene ``g``, three terms
``t1, t2, t3`` and the following connections:
::
t1-->t2
t2-->t3
g-->t1
g-->t2
In "forward" propagation, a new relation ``g-->t3`` is added. In
"reverse" propagation, the relation "g-->t2" is deleted
because it is an indirect relation inferred from "g-->t1" and
"t1-->t2".
Parameters
----------
direction : str
The direction of propagation. Either 'forward' or 'reverse'
inplace : bool
If True, then modify the ontology. If False, then create
and modify a copy.
Returns
-------
: ddot.Ontology.Ontology
"""
if inplace:
ont = self
else:
ont = self.copy()
assert direction in ['forward', 'reverse'], "Propagation direction must be forward or backward"
forward = direction=='forward'
if not forward:
# This is needed to ensure that the pruning to a parent's
# gene set can be based on the gene sets of its direct
# children
ont = ont.propagate(gene_term=gene_term, term_term=term_term, direction='forward', inplace=True)
if gene_term:
term_2_gene_set = {t : set(g) for t, g in ont.term_2_gene.items()}
if term_term:
parent_2_child_set = {p : set(c) for p, c in ont.parent_2_child.items()}
# # TODO: have this topological sorting be a part of the code below
# graph = ont.to_igraph(include_genes=False, spanning_tree=False)
# for c_idx in graph.topological_sorting(mode='in'):
# child = graph.vs[c_idx]['name']
for child in ont.topological_sorting(top_down=forward, include_genes=False):
for parent in ont.child_2_parent[child]:
if gene_term:
if forward:
term_2_gene_set[parent] |= term_2_gene_set[child]
else:
term_2_gene_set[parent] -= term_2_gene_set[child]
if term_term:
if forward:
parent_2_child_set[parent] |= parent_2_child_set[child]
else:
parent_2_child_set[parent] -= parent_2_child_set[child]
if gene_term:
ont.gene_2_term = invert_dict(term_2_gene_set,
keymap=make_index(ont.terms),
valmap=dict(enumerate(ont.genes)))
ont.term_2_gene = {a : list(b) for a, b in term_2_gene_set.items()}
if term_term:
ont.parent_2_child = {a : list(b) for a, b in parent_2_child_set.items()}
ont.child_2_parent = ont._get_child_2_parent()
ont._check_valid()
return ont
def get_ontotype(self,
genotypes,
input_format='gene_list',
output_format='dataframe',
matrix_columns=None):
"""Transform genotypes to ontotypes.
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., Sharan,
R. and <NAME>., 2016. "Translation of genotype to
phenotype by a hierarchy of cell subsystems". *Cell
Systems*, 2(2), pp.77-88.
Parameters
----------
genotypes : list, np.ndarray, scipy.sparse.spmatrix, pd.DataFrame
input_format : str
If "gene_list", then ``genotypes`` is a list of genotypes,
where genotype is itself a list of genes mutated. Each
gene is assumed to have a mutation value of 1.
If 'matrix', then ``genotypes`` is a genotype-by-gene
matrix, where the value at position (i,j) represents the
mutation value of gene j in genotype i. ``genotypes`` can
be a NumPy array, SciPy sparse matrix, or Pandas
dataframe.
output_format : str
If 'sparse', then return a sparse matrix as a
scipy.sparse.csr_matrix object. (default)
If 'dataframe', then return a pandas.DataFrame object.
If 'array', then return a numpy.ndarray object.
matrix_columns : list
represents a list of the genes that are represented by the
columns of ``genotypes``. Only used when input_format is
"matrix" and ``genotypes`` is a NumPy array or SciPy sparse
matrix.
Returns
-------
: scipy.sparse.csr_matrix, pandas.DataFrame, numpy.ndarray
genotype-by-term matrix, where the ordering of rows and
terms is the same as ``genotypes`` and ``self.terms``
"""
genotypes_names = None
if input_format=='gene_list':
gene_2_term = {k: np.array(v) for k, v in self.gene_2_term.items()}
genotypes_x = [np.concatenate([gene_2_term[g] for g in gset]) if len(gset)>0 else np.array([]) for gset in genotypes]
indices = np.concatenate(genotypes_x)
indptr = np.append(0, np.cumsum([gset.size for gset in genotypes_x]))
data = np.ones((indices.size, ), dtype=np.int64)
ontotypes = scipy.sparse.csr_matrix(
(data, indices, indptr),
(len(genotypes), len(self.terms)))
ontotypes.sum_duplicates()
elif input_format=='matrix':
if isinstance(genotypes, pd.DataFrame):
matrix_columns = genotypes.columns
genotypes_names = genotypes.index
genotypes = genotypes.values
elif isinstance(genotypes, np.ndarray) or scipy.sparse.issparse(genotypes):
assert matrix_columns is not None
else:
raise Exception("Parameter <genotypes> must be a genotype-by-gene matrix "
"represented as a Pandas dataframe, NumPy array, or SciPy sparse matrix. "
"Consider changing the <input_format> parameter")
contained = np.array([g in self.genes_index for g in matrix_columns])
genotypes = scipy.sparse.csc_matrix(genotypes)[:,contained]
gene_2_term_matrix = scipy.sparse.csr_matrix(self.get_gene_2_term_matrix())
gene_2_term_matrix = scipy.sparse.csr_matrix(gene_2_term_matrix)[contained,:]
ontotypes = genotypes.dot(gene_2_term_matrix)
else:
raise Exception('Invalid input format')
if output_format=='dataframe':
ontotypes = pd.DataFrame(ontotypes.toarray(), columns=self.terms)
if genotypes_names is not None:
ontotypes.index = genotypes_names
elif output_format=='sparse':
pass
elif output_format=='array':
ontotypes = ontotypes.toarray()
else:
raise Exception('Invalid output format')
return ontotypes
def get_gene_2_term_matrix(self):
"""Returns a gene-by-term matrix stored as a scipy.sparse.coo_matrix
Returns
-------
: scipy.sparse.coo_matrix
"""
# Convert gene names to indices
gene_2_term = [(self.genes_index[g], t_list)
for g, t_list in self.gene_2_term.items()]
gene_2_term_matrix = scipy.sparse.coo_matrix(
([1 for g, t_list in gene_2_term for t in t_list],
([g for g, t_list in gene_2_term for t in t_list],
[t for g, t_list in gene_2_term for t in t_list])),
shape=(len(self.genes), len(self.terms)))
return gene_2_term_matrix
def summary(self):
"""Summarize the Ontology's contents with respect to number of genes,
terms, and connections.
Returns
--------
: str
"""
if self.node_attr is None:
node_attr_names = []
else:
node_attr_names = self.node_attr.columns.tolist()
# node_attr_names = ', '.join(map(str, self.node_attr.columns))
if self.edge_attr is None:
edge_attr_names = []
else:
edge_attr_names = self.edge_attr.columns.tolist()
# edge_attr_names = ', '.join(map(str, self.edge_attr.columns))
summary = ('%s genes, '
'%s terms, '
'%s gene-term relations, '
'%s term-term relations'
'\nnode_attributes: %s'
'\nedge_attributes: %s') % (
len(self.genes),
len(self.terms),
sum([len(x) for x in self.gene_2_term.values()]),
sum([len(x) for x in self.parent_2_child.values()]),
node_attr_names,
edge_attr_names)
return summary
def to_ndex(self,
ndex_user,
ndex_pass,
ndex_server=None,
name=None,
description=None,
network=None,
main_feature=None,
subnet_max_term_size=None,
visible_term_attr=None,
layout='bubble',
propagate='reverse',
style=None,
node_alias='Original_Name',
term_2_uuid=None,
visibility='PUBLIC',
verbose=False):
"""Upload an Ontology object to NDEx. The Ontology can be preformatted in
several ways including
1. Set a name and description of the Ontology
2. Upload a supporting gene-gene subnetwork for every term in the Ontology
3. Propagate gene-term annotations
4. Layout the nodes.
5. Apply a visual style, e.g. specifying node and edge colors
Parameters
----------
name : str
Name of Ontology
description : str
Description of Ontology
layout : str
The name of the layout algorithm for laying out the
Ontology as a graph. Node positions are stored in the
node attributes 'x_pos' and 'y_pos'. If None, then do not
perform a layout.
style : ndex.networkn.NdexGraph
The Cytoscape.js visual style on NDEx. Represented using
CX and stored in an NdexGraph.
network : pandas.Dataframe
Dataframe describing gene-gene network from which to
create subnetworks for every term. To be passed to
Ontology.upload_subnets_ndex().
features : list of str
Columns in the gene-gene network to upload. To be passed
to Ontology.upload_subnets_ndex().
ndex_server : str
URL of NDEx server
ndex_user : str
NDEx username
ndex_pass : str
NDEx password
public : bool
Whether to make the Ontology public on NDEx
node_alias : str
visibility : str
Returns
-------
: ndex.networkn.NdexGraph
"""
if propagate is not None:
ont = self.propagate(direction=propagate, inplace=False)
else:
ont = self
if ndex_server is None:
ndex_server = ddot.config.ndex_server
if (network is not None) and (term_2_uuid is None):
if subnet_max_term_size is None:
terms = ont.terms
else:
terms = [t for t,s in zip(ont.terms, ont.term_sizes) if s <= subnet_max_term_size]
# Only upload subnets for the unique set of the original
# terms
if node_alias in ont.node_attr.columns:
orig_2_new = {a : b.index.values for a, b in ont.node_attr.loc[terms, [node_alias]].groupby(node_alias)}
terms = [b[0] for b in orig_2_new.values()]
term_2_uuid = ont.upload_subnets_ndex(
network,
main_feature,
name,
ndex_user,
ndex_pass,
ndex_server=ndex_server,
terms=terms,
visibility=visibility,
verbose=verbose
)
if node_alias in ont.node_attr.columns:
term_2_uuid = {s : term_2_uuid[orig_2_new[t][0]] for t in orig_2_new for s in orig_2_new[t] if orig_2_new[t][0] in term_2_uuid}
elif term_2_uuid is None:
term_2_uuid = {}
if verbose: print('Creating NdexGraph')
G = ont.to_NdexGraph(
name=name,
description=description,
term_2_uuid=term_2_uuid,
layout=layout,
style=style)
if visible_term_attr is not None:
df = ddot.utils.nx_nodes_to_pandas(G, visible_term_attr)
df.rename(columns=lambda x: 'Display:' + x, inplace=True)
ddot.utils.set_node_attributes_from_pandas(G, df)
G.set_network_attribute('Display', '|'.join(visible_term_attr))
if verbose: print('Uploading to NDEx')
ont_url = G.upload_to(ndex_server, ndex_user, ndex_pass, visibility=visibility)
return ont_url, G
def to_NdexGraph(self,
name=None,
description=None,
term_2_uuid=None,
spanning_tree=True,
layout='bubble',
style=None,
verbose=False):
"""Formats an Ontology object into a NetworkX object with extra node
attributes that are accessed by the hierarchical viewer.
Parameters
-----------
name : str
Name of Ontology, as would appear if uploaded to NDEx.
description : str
Description of Ontology, as would appear if uploaded to NDEx.
term_2_uuid : dict
A dictionary mapping a term to a NDEx UUID of a gene-gene
subnetwork of genes in that term. the UUID will be stored
in the node attribute 'ndex:internallink'. If uploaded to
NDEx, then this attribute will provide a hyperlink to the
gene-gene subnetwork when the term is clicked upon on the
NDEx page for this ontology.
This dictionary can be created using
Ontology.upload_subnets_ndex(). Default: no dictionary.
layout : str
Layout the genes and terms in this Ontology. Stored in the
node attributes 'x_pos' and 'y_pos'. If None, then do not
perform a layout.
Returns
-------
: ndex.networkn.NdexGraph
"""
# Convert to NetworkX
G = self.to_networkx(layout=layout, spanning_tree=spanning_tree)
if style is None:
style = 'passthrough'
# Set extra attributes for passthrough visual styling
if style=='passthrough':
for v, data in G.nodes(data=True):
is_gene = data[self.NODETYPE_ATTR]==self.GENE_NODETYPE
if 'Vis:Shape' not in data:
data['Vis:Shape'] = 'Rectangle' if is_gene else 'Circle'
if 'Vis:Fill Color' not in data:
data['Vis:Fill Color'] = '#FFFFFF'
if 'Vis:Border Paint' not in data:
data['Vis:Border Paint'] = '#000000'
for u, v, data in G.edges(data=True):
if 'Vis:Visible' not in data and 'Is_Tree_Edge' in data:
data['Vis:Visible'] = data['Is_Tree_Edge']=='Tree'
style = ddot.config.get_passthrough_style()
else:
raise Exception('Unsupported style')
# Set links to subnetworks supporting each term
if term_2_uuid:
for t in self.terms:
if t in term_2_uuid:
G.node[t]['ndex:internalLink'] = '[%s](%s)' % (G.node[t]['Label'], term_2_uuid[t])
# # Change Original_Name to node indices
# name_2_idx = {data['name'] : v for v, data in G.nodes(data=True)}
# for v, data in G.nodes(data=True):
# if 'Original_Name' in data and 'Hidden' in data and data['Hidden']==True:
# data['Original_Name'] = name_2_idx[data['Original_Name']]
G = nx_to_NdexGraph(G)
if name is not None:
G.set_name(name)
if description is not None:
G.set_network_attribute('Description', description)
if style:
import ndex.beta.toolbox as toolbox
toolbox.apply_network_as_template(G, style)
return G
def to_cx(self,
output=None,
name=None,
description=None,
term_2_uuid=None,
spanning_tree=True,
layout='bubble',
style=None):
"""Formats an Ontology object into a CX file format
Parameters
-----------
output : str
Filename or file-like object to write CX file. If None,
then CX is returned as a JSON object, but not written to a
file.
name : str
Name of Ontology, as would appear if uploaded to NDEx.
description : str
Description of Ontology, as would appear if uploaded to NDEx.
term_2_uuid : list
A dictionary mapping a term to a NDEx UUID of a gene-gene
subnetwork of genes in that term. the UUID will be stored
in the node attribute 'ndex:internallink'. If uploaded to
NDEx, then this attribute will provide a hyperlink to the
gene-gene subnetwork when the term is clicked upon on the
NDEx page for this ontology.
This dictionary can be created using
Ontology.upload_subnets_ndex(). Default: no dictionary.
layout : str
Layout the genes and terms in this Ontology. Stored in the
node attributes 'x_pos' and 'y_pos'. If None, then do not
perform a layout.
Returns
-------
: CX representation as a JSON-like dictionary
"""
# Convert to NdexGraph
G = self.to_NdexGraph(name=name,
description=description,
term_2_uuid=term_2_uuid,
spanning_tree=spanning_tree,
layout=layout,
style=style)
cx = G.to_cx()
if output is not None:
if hasattr(output, 'write'):
json.dump(cx, output)
else:
with io.open(output, 'w') as f:
json.dump(cx, f)
return cx
def to_graphml(self,
output,
layout='bubble',
spanning_tree=True):
"""Writes an Ontology object in graphml format.
Parameters
-----------
output : str
Filename or file-like object to write CX file. If None,
then CX is returned as a JSON object, but not written to a
file.
layout : str
Layout the genes and terms in this Ontology. Stored in the
node attributes 'x_pos' and 'y_pos'. If None, then do not
perform a layout.
"""
# Convert to NetworkX
G = self.to_NdexGraph(spanning_tree=spanning_tree,
layout=layout)
if hasattr(output, 'write'):
nx.write_graphml(G, output)
else:
with io.open(output, 'w') as f:
nx.write_graphml(G, f)
def _force_directed_layout(self, G):
"""Force-directed layout on only the terms"""
sub_nx = G.copy()
sub_nx.remove_edges_from([(u,v) for u,v,attr in sub_nx.edges(data=True) if attr['Is_Tree_Edge']=='Not_Tree'])
pos = nx.spring_layout(sub_nx, dim=2, k=None,
pos=None,
fixed=None,
iterations=50,
weight=None,
scale=1.0)
tmp = np.array([x[0] for x in pos.values()])
x_min, x_max = tmp.min(), tmp.max()
tmp = np.array([x[1] for x in pos.values()])
y_min, y_max = tmp.min(), tmp.max()
x_scale = 500. / (y_max - y_min)
y_scale = 500. / (x_max - x_min)
pos = {a : [b[0] * x_scale, b[1] * y_scale] for a, b in pos.items()}
return pos
def upload_subnets_ndex(self,
network,
main_feature,
name,
ndex_user,
ndex_pass,
ndex_server=None,
terms=None,
gene_columns=['Gene1', 'Gene2'],
propagate='forward',
visibility='PUBLIC',
node_attr=None,
node_alias='Original_Name',
z_score=False,
spring_feature=None, spring_weight=1.0,
edge_groups=None,
max_num_edges = -1,
verbose=False):
"""For each term in the ontology, upload a subnetwork of interactions
between the genes in that term to NDEx.
TODO: instead of specifying gene_columns, add another
parameter use_index to specify that genes are the network's
index
Parameters
----------
network : pandas.Dataframe
Dataframe describing network
features : list of str
Columns in network to upload
name : str
Prefix for the names of all subnetworks
ndex_server : str
URL of NDEx server
ndex_user : str
NDEx username
ndex_pass : str
NDEx password
terms : list
List of terms to upload a subnetwork. Default: upload for
all terms.
gene_columns : list
Columns in network that represent the two genes.
propagate : str
The direction ('forward' or 'reverse') to propagate
gene-term annotations up the hierarchy with
Ontology.propagate(). If None, then don't
propagate annotations.
public : bool
Whether to make networks public on NDEx
node_attr : pandas.DataFrame
"""
if propagate:
ont = self.propagate(direction=propagate, inplace=False)
else:
ont = self
if ndex_server is None:
ndex_server = ddot.config.ndex_server
ndex = nc.Ndex(ndex_server, ndex_user, ndex_pass)
term_2_uuid = {}
start = time.time()
g1, g2 = gene_columns[0] + '_lex', gene_columns[1] + '_lex'
features = [f for f in network.columns if (f not in gene_columns)]
assert main_feature in features, 'A main feature of the network must be specified'
network = network[features + gene_columns].copy()
network[gene_columns[0]] = network[gene_columns[0]].astype(str)
network[gene_columns[1]] = network[gene_columns[1]].astype(str)
# Filter dataframe for gene pairs within the ontology
genes_set = set(ont.genes)
tmp = [x in genes_set and y in genes_set
for x, y in zip(network[gene_columns[0]], network[gene_columns[1]])]
network = network.loc[tmp, :]
# Lexicographically sort gene1 and gene2 so that gene1 < gene2
# actually this may be redundant
if z_score:
for feat in features:
network[feat] = network[feat].astype(np.float64)
# Normalize features into z-scores
tmp = network[features]
network[features] = (tmp - tmp.mean()) / tmp.std()
# network_sq = ddot.utils.pivot_square(network, g1, g2, main_feature)
# Calculate the min/max range of features
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
def f(x):
if str(x) in numerics:
return 'numeric'
elif str(x) == 'bool':
return 'boolean'
else:
raise Exception()
feature_types = network[features].dtypes.map(f)
feature_mins = network[features].min().astype(np.str)
feature_maxs = network[features].max().astype(np.str)
# set an upper limit to the maximum number of edges uploaded to NDEx
# (contributed by <NAME>)
if max_num_edges > 0:
network.sort_values(by = main_feature, ascending=False, inplace=True)
network = network.iloc[:max_num_edges, :]
# Lexicographically sort gene1 and gene2 so that gene1 < gene2
# actually this may be redundant
network[g1], network[g2] = zip(
*[(x, y) if x < y else (y, x) for x, y in zip(network[gene_columns[0]], network[gene_columns[1]])])
network_idx = {x: i for i, x in enumerate(zip(network[g1], network[g2]))}
if terms is None:
terms = ont.terms
if verbose: print('Uploading %s terms' % len(terms))
for upload_idx, t in enumerate(terms):
start = time.time()
if node_alias in ont.node_attr.columns:
genes = ont.node_attr.loc[genes, node_alias].values
else:
genes = [ont.genes[g] for g in ont.term_2_gene[t]]
genes.sort()
gene_pairs_idx = [network_idx[gp] for gp in itertools.combinations(genes, 2) \
if gp in network_idx]
# New (Parent weight)
children = ont.parent_2_child[t]
min_children_term_weights = -1
if ('Parent weight' in ont.node_attr.columns.tolist()) and (len(children) >0):
children_term_weights = []
for c in children:
if ont.node_attr.loc[c, 'Parent weight'] >0:
children_term_weights.append(ont.node_attr.loc[c, 'Parent weight'])
if len(children_term_weights):
children_term_weights = np.array(children_term_weights)
min_children_term_weights = np.min(children_term_weights)
if len(gene_pairs_idx) > 0:
network_sub = network.iloc[gene_pairs_idx, :]
network_sub = network_sub.loc[network_sub[main_feature] >= ont.node_attr.loc[t, 'Parent weight']]
# filter network if max_num_edges is greater then 0
if max_num_edges != None and max_num_edges > 0:
network_sub.sort_values(by=main_feature, ascending=False, inplace=True)
network_sub = network_sub.iloc[:max_num_edges, :]
# New: apply some minimum string force so nodes will not fly away
# if spring_feature != None:
# network_sub.loc[network_sub[spring_feature] < min_children_term_weights, spring_feature] = 0.5*min_children_term_weights
# network_sub[spring_feature] = network_sub[spring_feature] ** spring_weight
G_nx = nx.from_pandas_dataframe(network_sub, g1, g2,
edge_attr=features)
if node_attr is not None:
set_node_attributes_from_pandas(G_nx, node_attr)
G_nx.add_nodes_from(list(set(genes) - set(G_nx.nodes())))
# Annotate the membership in children terms
children = ont.parent_2_child[t]
df = pd.DataFrame({c : None for c in children}, index=genes, dtype=bool)
for c in children:
genes_in = [ont.genes[g] for g in ont.term_2_gene[c]]
# for g in genes_in:
# G_nx.node[g]['Group:'+c] = True
df.loc[genes_in, c] = True
df.rename(columns=lambda x: 'Group:'+x, inplace=True)
ddot.utils.set_node_attributes_from_pandas(G_nx, df)
# # If a gene belongs to multiple children, then place it where it is most similar
# for g_i in (df.sum(1) > 0).nonzero():
# g = genes[g_i]
# choices = df.loc[g, :].nonzero()
# network_sq.loc[g, :].argmax()
G = nx_to_NdexGraph(G_nx)
G.set_name('%s supporting network for %s' % (name, t))
G.set_network_attribute('Description', '%s supporting network for %s' % (name, t))
G.set_network_attribute('Main Feature', main_feature)
for f in features:
if (f == spring_feature) and (f != main_feature):
continue
G.set_network_attribute('%s type' % f, feature_types[f])
if feature_types[f] == 'numeric':
G.set_network_attribute('%s min' % f, feature_mins[f])
G.set_network_attribute('%s max' % f, feature_maxs[f])
# for c in children:
# G.set_network_attribute('Group:' + c, True)
G.set_network_attribute('Group', '|'.join(children))
# New: calculate the score threshold of this subnetwork
G.set_network_attribute('Main Feature Default Cutoff', float(ont.node_attr.loc[t, 'Parent weight']))
G.set_network_attribute('Parent weight', float(ont.node_attr.loc[t, 'Parent weight']))
if min_children_term_weights > 0:
G.set_network_attribute('Children weight', '|'.join(['{:.3f}'.format(w) for w in children_term_weights]))
# G.set_network_attribute('Main Feature Default Cutoff', float(min_children_term_weights))
if isinstance(edge_groups, dict) and (len(edge_groups.keys()) > 0):
edge_group_string = []
for k, vs in edge_groups.items():
vs.sort()
edge_group_string.append(','.join([k] + vs))
edge_group_string = '|'.join(edge_group_string)
G.set_network_attribute('edge groups', edge_group_string)
# New: only keep the biggest compoent in the network
G = max(nx.weakly_connected_component_subgraphs(G), key=len)
# # further remove degree == 1 nodes
# if len(G.nodes()) > 6:
# low_deg_nodes = []
# for v, deg in G.degree().items():
# if deg <= 1:
# low_deg_nodes.append(v)
#
# while len(low_deg_nodes) != 0:
# G.remove_nodes_from(low_deg_nodes)
# low_deg_nodes = []
# for v, deg in G.degree().items():
# if deg <= 1:
# low_deg_nodes.append(v)
# New: compute a pre-layout to networks
if spring_feature != None:
# G_cx = G.to_cx() # why converted back and forth
# G = NdexGraph(G_cx)
gsim = layouts._create_simple_graph(G)
pos = nx.spring_layout(gsim, scale=200 * math.sqrt(gsim.number_of_nodes()), weight=spring_feature)
G.pos = pos
# layouts.apply_directed_flow_layout(G, node_width=50, weight=spring_feature)
start_upload = time.time()
ndex_url = G.upload_to(ndex_server, ndex_user, ndex_pass, visibility=visibility)
term_2_uuid[t] = parse_ndex_uuid(ndex_url)
upload_time = time.time() - start_upload
if verbose:
print(upload_idx,
'Term:', t,
'Gene pairs:', len(G_nx.edges()),
'Genes:', len(genes),
'Time:', round(time.time() - start, 4),
'Upload time:', round(upload_time, 4),
'NDEx URL:', ndex_url)
else:
if verbose:
print(upload_idx, 'No data provided for gene pairs in Term: %s' % t)
return term_2_uuid
def get_best_ancestors(self, node_order=None, verbose=False, include_genes=True):
"""Compute the 'best' ancestor for every pair of terms. 'Best' is
specified by a ranking of terms. For example, if terms are
ranked by size, from smallest to largest, then the smallest
common ancestor is calculated.
Parameters
----------
node_order : list
A list of terms, ordered by their rank with the 'best' term at the beginning.
include_genes : bool
Returns
--------
ancestors : np.ndarray
ancestors[a,b] = the best common ancestor of terms a and
b, represented as a 0-based index of self.terms
nodes : list
List of the row and column names. Rows and columns are the
same.
"""
ont = self.propagate(direction='reverse', inplace=False)
graph = ont.to_igraph(include_genes=include_genes, spanning_tree=False)
if node_order is None:
# By default, sort from smallest to largest terms
node_order = [self.terms[t] for t in np.argsort(ont.term_sizes)]
d = np.int8(np.isfinite(np.array(graph.shortest_paths(graph.vs, graph.vs, mode='out'), order='C')))
ancestor_matrix = np.zeros(d.shape, dtype=np.int32)
ancestor_matrix.fill(-1)
if verbose: time_print('Iterating:')
for t in node_order:
i = graph.vs.find(t).index
t_i = self.terms_index[t]
# Note: includes self as a child
children = np.where(d[:,i] == 1)[0]
# For those descendants without a computed LCA yet, set their LCA to this term
lca_sub = ancestor_matrix[children.reshape(-1,1), children]
lca_sub[lca_sub == -1] = t_i
ancestor_matrix[children.reshape(-1,1), children] = lca_sub
# Check symmetry
assert (ancestor_matrix.T == ancestor_matrix).all()
assert (-1 == ancestor_matrix).sum() == 0, 'The ontology may have more than one root'
return ancestor_matrix, graph.vs['name']
@classmethod
def _make_tree_igraph(self,
graph=None,
method='priority',
edge_name='smallest_parent',
parent_priority=None, edge_priority=None, default_priority=None, optim='max'):
"""Returns copy of graph with new edge attribute marking spanning
tree
"""
if graph is None:
graph = self.to_igraph(include_genes=False, spanning_tree=True)
if method=='priority':
assert 1 == (parent_priority is not None) + (edge_priority is not None)
if edge_priority is not None: assert default_priority is not None
if optim=='min': optim=min
if optim=='max': optim=max
graph.es[edge_name] = False
for v in graph.vs:
parents = graph.neighbors(v.index, mode='out')
if len(parents) > 0:
"""Choose the parent with the highest valued priority"""
if parent_priority is not None:
small_parent = optim(parents, key=lambda p: parent_priority[p])
elif edge_priority is not None:
small_parent = optim(parents, key=lambda p: edge_priority.get(graph.get_eid(v.index, p), default_priority))
graph.es[graph.get_eid(v.index, small_parent)][edge_name] = True
else:
raise Exception('Method not supported')
return graph
def to_pickle(self, file, compression='infer'):
"""Saves Ontology object with the Python pickle protocol."""
pandas.io.pickle.to_pickle(self, file, compression=compression)
@classmethod
def read_pickle(cls, file, compression='infer'):
"""Loads an Ontology object from a pickled state."""
return pandas.io.pickle.read_pickle(file, compression=compression)
def __repr__(self):
return self.summary()
def __str__(self):
return self.summary()
|
from __future__ import absolute_import, print_function, division
import itertools, multiprocessing, logging, os, collections, random, math, sys, time
from itertools import groupby, combinations
from operator import *
from collections import Counter
import tempfile
from subprocess import Popen, PIPE, STDOUT
import inspect
import shlex
import shutil
import io
from io import StringIO
import json
import datetime
import numpy as np
import pandas as pd
import pandas.io.pickle
import networkx as nx
import igraph
import scipy, scipy.sparse
from scipy.sparse import csr_matrix, coo_matrix
from scipy.stats import hypergeom
import ndex.client as nc
from ndex.networkn import NdexGraph
import ndex.beta.layouts as layouts
import ddot
import ddot.config
from ddot.utils import time_print, set_node_attributes_from_pandas, set_edge_attributes_from_pandas, nx_to_NdexGraph, NdexGraph_to_nx, parse_ndex_uuid, parse_ndex_server, make_index, update_nx_with_alignment, bubble_layout_nx, split_indices_chunk, invert_dict, make_network_public, nx_edges_to_pandas, nx_nodes_to_pandas, ig_edges_to_pandas, ig_nodes_to_pandas, melt_square, nx_set_tree_edges, gridify
def _collapse_node(g,
v,
edge_filter=None,
use_v_name=False,
combine_attrs=None,
default_attr=None,
verbose=True,
fast_collapse=False,
delete=True):
"""Collapses a node in a Graph (igraph package) while preserving
long-range hierarchical relations between descendants and
ancestral nodes.
"""
if use_v_name:
assert isinstance(v, str)
v = g.vs.find(name_eq=v).index
try:
g.vs[v]
except:
raise Exception("Can't find vertex %s in graph. Consider setting use_v_name=True" % v)
if fast_collapse:
parents = g.neighbors(v, mode='out')
children = g.neighbors(v, mode='in')
if len(parents) > 0 and len(children) > 0:
# A faster collapse that adds all new edges
# simultaneously. Ignores edge attributes
new_edges = [(c, p) for p in parents for c in children]
new_edges = [x for x, y in zip(new_edges, g.get_eids(new_edges, error=False)) if y == -1]
g.add_edges(new_edges)
else:
g.es['collapsed_length'] = 0
g.es['collapsed_terms'] = [[] for x in g.es]
in_edges = g.es[g.incident(v, mode='in')]
out_edges = g.es[g.incident(v, mode='out')]
if edge_filter is not None:
in_edges = [e for e in in_edges if edge_filter(e)]
out_edges = [e for e in out_edges if edge_filter(e)]
for e_in in in_edges:
for e_out in out_edges:
in_neigh, out_neigh = e_in.source, e_out.target
# Only add an edge if it doesn't already exist
if g[in_neigh, out_neigh] == 0:
g.add_edge(in_neigh, out_neigh)
e = g.es[g.get_eid(in_neigh, out_neigh)]
if combine_attrs is not None:
# Set default value of edge attributes to 0
for key in combine_attrs: e[key] = None
e = g.es[g.get_eid(in_neigh, out_neigh)]
# Update attributes
if combine_attrs is not None:
for key in combine_attrs:
e[key] = combine_attrs[key](e_in, e_out, e)
if verbose and key=='triangle_edge_priority':
print('Setting',
key,
g.vs[in_neigh]['name'],
g.vs[out_neigh]['name'],
'to',
combine_attrs[key](e_in, e_out, e),
(e_in[key], e_out[key]))
e['collapsed_length'] = e_in['collapsed_length'] + e_out['collapsed_length']
e['collapsed_terms'] = e_in['collapsed_terms'] + [g.vs[v]['name']] + e_out['collapsed_terms']
if delete:
g.delete_vertices(v)
return g
def read_alignment_file(f, source='Term_1'):
"""Parses an alignment file created from alignOntology's calculateFDRs script
Parameters
-----------
f : str
Filename of alignment file
source : str
Indicates which ontology will be the index of the
returned pandas.DataFrame. Value must be either 'Term_1' (first
ontology) or 'Term_2' (second ontology)
Returns
--------
: pandas.DataFrame
DataFrame with four columns: 'Term', 'Similarity', 'FDR', and 'Size'.
The index of the DataFrame are the names of terms in the "source" ontology.
"""
# Five columns in the input file
# 1) Term from first "computed" ontology
# 2) Term from second "reference" ontology
# 3) Similarity value
# 4) FDR
# 5) Size of the term in the first ontology
df = pd.read_table(f,
names=['Term_1', 'Term_2', 'Similarity', 'FDR', 'Size'],
dtype={'Term_1':str,
'Term_2':str,
'Similarity':np.float64,
'FDR':np.float64,
'Size':np.int64},
header=None)
target = 'Term_2' if source=='Term_1' else 'Term_1'
df.rename(columns={target : 'Term'}, inplace=True)
df.set_index(source, inplace=True)
df.index.rename('Term', inplace=True)
return df
def align_hierarchies(hier1,
hier2,
iterations,
threads,
update_hier1=False,
update_hier2=False,
calculateFDRs=None,
mutual_collapse=True,
output=None,
verbose=False):
if output is None:
with tempfile.NamedTemporaryFile('w', delete=True) as output_file:
return align_hierarchies(hier1, hier2, iterations, threads,
update_hier1=update_hier1, update_hier2=update_hier2,
mutual_collapse=mutual_collapse,
output=output_file.name,
calculateFDRs=calculateFDRs,
verbose=verbose)
common_genes = set(hier1.genes) & set(hier2.genes)
hier1_orig, hier2_orig = hier1, hier2
if len(common_genes) > 0:
if mutual_collapse:
hier1, hier2 = Ontology.mutual_collapse(hier1, hier2, verbose=verbose)
hier1.clear_node_attr()
hier1.clear_edge_attr()
hier2.clear_node_attr()
hier2.clear_edge_attr()
hier1.propagate('reverse', inplace=True)
hier2.propagate('reverse', inplace=True)
def to_file(hier):
if isinstance(hier, Ontology):
with tempfile.NamedTemporaryFile('w', delete=False) as f:
hier.to_table(f, clixo_format=True)
hier = f.name
else:
assert isinstance(hier, file) or os.path.exists(hier)
return hier
hier1 = to_file(hier1)
hier2 = to_file(hier2)
if calculateFDRs is None:
top_level = os.path.dirname(os.path.abspath(inspect.getfile(ddot)))
calculateFDRs = os.path.join(top_level, 'alignOntology', 'calculateFDRs')
#assert os.path.isdir(ddot.config.alignOntology)
#calculateFDRs = os.path.join(ddot.config.alignOntology, 'calculateFDRs')
assert os.path.isfile(calculateFDRs)
if threads is None:
import multiprocessing
threads = multiprocessing.cpu_count()
output_dir = tempfile.mkdtemp(prefix='tmp')
cmd = '{5} {0} {1} 0.05 criss_cross {2} {3} {4} gene'.format(
hier1, hier2, output_dir, iterations, threads, calculateFDRs)
print('Alignment command:', cmd)
p = Popen(shlex.split(cmd), shell=False)
try:
p.wait()
shutil.copy(os.path.join(output_dir, 'alignments_FDR_0.1_t_0.1'), output)
finally:
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
if p.poll() is None:
if verbose: time_print('Killing alignment process %s. Output: %s' % (p.pid, output))
p.kill() # Kill the process
align1 = read_alignment_file(output)[['Term', 'Similarity', 'FDR']]
else:
align1 = pd.DataFrame(columns=['Term', 'Similarity', 'FDR'])
align2 = align1.copy()
align2.index, align2['Term'] = align2['Term'].values.copy(), align2.index.values.copy()
append_prefix = lambda x: 'Aligned_%s' % x
if update_hier1:
if hasattr(update_hier1, '__iter__'):
node_attr = hier2_orig.node_attr[update_hier1]
else:
node_attr = hier2_orig.node_attr
hier2_import = pd.merge(pd.DataFrame(index=align2.index), node_attr, left_index=True, right_index=True, how='left')
assert (hier2_import.index == align2.index).all()
# Change index to terms in hier1
hier2_import.index = align2['Term'].copy()
hier2_import.rename(columns=append_prefix, inplace=True)
if update_hier2:
if hasattr(update_hier2, '__iter__'):
node_attr = hier1_orig.node_attr[update_hier2]
else:
node_attr = hier1_orig.node_attr
hier1_import = pd.merge(pd.DataFrame(index=align1.index), node_attr, left_index=True, right_index=True, how='left')
assert (hier1_import.index == align1.index).all()
# Change index to terms in hier2
hier1_import.index = align1['Term'].copy()
hier1_import.rename(columns=append_prefix, inplace=True)
if update_hier1:
hier1_orig.update_node_attr(align1.rename(columns=append_prefix))
hier1_orig.update_node_attr(hier2_import)
if update_hier2:
hier2_orig.update_node_attr(align2.rename(columns=append_prefix))
hier2_orig.update_node_attr(hier1_import)
return align1
def parse_obo(obo,
output_file=None,
id2name_file=None,
id2namespace_file=None,
alt_id_file=None):
"""Parses an OBO file and writes the results into several tables.
Parameters
----------
obo : str
Filename of OBO file
output_file : str
Filename to write table that describes the ontology's
hierarchical structure. The table has four columns: (1) parent
term, (2) child term, (3) relation type (e.g. "is_a" or
"part_of"), (4) namespace of relation
(e.g. "biological_process" or "cellular component")
id2name_file : str
Filename to write table of term descriptions. The table has
two columns: (1) Ontology term (e.g. "GO:0000030"), (2)
description (e.g. "mannosyltransferase activity")
id2namespace_file : str
Filename to write table of term namespaces. The table has two
columns: (1) Ontology term (e.g. "GO:0000030"), (2) namespace
of the term (e.g. "biological_process")
alt_id_file : str
Filename to write table of alternative Term IDs that are
synonyms and refer to the same term. The table has two
columns: (1) Primary Term ID, (2) Alternative Term ID
"""
## Keywords that screw up parsing:
# import, is_anonymous, intersection_of, union_of
## Relations
# 'is_a:'
# 'relationship: has_part' # Not in filtered GO
# 'relationship: occurs_in' # Not in filtered GO
# 'relationship: part_of'
# 'relationship: positively_regulates'
# 'relationship: negatively_regulates'
# 'relationship: regulates'
# 'relationship: results_in' # Not in filtered GO
stanza, edges = [], []
id2name = dict()
id2namespace = dict()
alt_id = dict()
in_term_stanza = False
default_namespace_exists = False
for line in io.open(obo).read().splitlines():
line = line.split('!')[0].strip() # Remove comments
if len(line)>0 and line[0]=='[' and line[-1]==']':
# Add last stanza if it was a term stanza. Include namespace.
if in_term_stanza:
edges.extend(x+(namespace, ) for x in stanza)
# Start new term stanza
stanza = []
# Set the default namespace, if it exists
if default_namespace_exists:
namespace = default_namespace
# In a term stanzo or not
in_term_stanza = line =='[Term]'
name = None
#if 'alt_id:' in line: assert False
if 'id:' == line[:3]:
curr_term = line.split('id:')[1].strip()
elif 'alt_id:' in line:
alt_term = line.split('alt_id:')[1].strip()
if curr_term in alt_id: alt_id[curr_term].append(alt_term)
else: alt_id[curr_term] = [alt_term]
id2name[alt_term] = name
elif 'name:' in line:
name = line.split('name:')[1].strip()
assert not curr_term in id2name
id2name[curr_term] = name
elif 'is_a:' in line:
parent = line.split('is_a:')[1].strip()
stanza.append((parent, curr_term, 'is_a'))
elif 'relationship:' in line:
line = line.split('relationship:')[1].strip().split()
if len(line)!=2: print(line)
assert len(line)==2
relation, parent = line
stanza.append((parent, curr_term, relation))
elif 'namespace:' == line[:10]:
namespace = line.split('namespace:')[1].strip()
assert not curr_term in id2namespace
id2namespace[curr_term] = namespace
elif 'default-namespace:' == line[:18]:
namespace = line.split('default-namespace:')[1].strip()
default_namespace_exists = True
default_namespace = namespace
pd.DataFrame(edges).to_csv(output_file, header=False, index=False, sep='\t')
pd.Series(id2name).to_csv(id2name_file, sep='\t')
pd.Series(id2namespace).to_csv(id2namespace_file, sep='\t')
pd.Series(dict([(a, c) for a, b in alt_id.items() for c in b])).to_csv(alt_id_file, sep='\t')
def parse_gaf(gaf):
"""
Read gene-term annotations from GAF file format:
http://geneontology.org/page/go-annotation-file-gaf-format-21
Parameters
----------
gaf : str
Filename of GAF file
Returns
--------
A list of 2-tuples (gene, GO term)
"""
gaf_columns = ['DB', 'DB Object ID', 'DB Object Symbol',
'Qualifier', 'GO ID', 'DB:Reference',
'Evidence Code', 'With (or) From', 'Aspect',
'DB Object Name', 'DB Object Synonym',
'DB Object Type', 'Taxon', 'Date',
'Assigned By', 'Annotation Extension',
'Gene Product Form ID']
df = pd.read_table(gaf, header=None, comment='!', names=gaf_columns)
# Check that all annotations are to UniProtKB protein IDs
# assert df['DB'].unique().size == 1 and df['DB'].unique()[0]=='UniProtKB'
# Remove annotations that have a NOT qualifier
df = df.loc[df['Qualifier']!='NOT', :]
# return df.loc[:, ['DB Object ID', 'GO ID']].values.tolist()
return df
class Ontology(object):
"""A Python representation for constructing, analyzing, and
manipulating the hierarchical structure of ontologies.
An Ontology object contains the following attributes for
representing the hierarchical structure. Do not directly modify
these attributes.
Parameters
----------
genes : list
Names of genes
terms : list
Names of terms
gene_2_term : dict
gene_2_term[<gene>] --> list of terms connected to
<gene>. Terms are represented as their 0-based index in
self.terms.
term_2_gene : dict
term_2_gene[<term>] --> list of genes connected to
<term>. Genes are represented as their 0-based index in
self.genes.
child_2_parent : dict
child_2_parent[<child>] --> list of the parent terms of <child>
parent_2_child : dict
parent_2_child[<parent>] --> list of the children terms of <parent>
term_sizes : list
A list of every term's size, i.e. the number of unique genes
that it and its descendant terms contain. This list has the
same order as self.terms. It holds that for every i,
`term_sizes[i] = len(self.term_2_gene[self.terms[i]])`
"""
NODETYPE_ATTR = 'NodeType'
GENE_NODETYPE = 'Gene'
TERM_NODETYPE = 'Term'
EDGETYPE_ATTR = 'EdgeType'
GENE_TERM_EDGETYPE = 'Gene-Term'
CHILD_PARENT_EDGETYPE = 'Child-Parent'
def __init__(self,
hierarchy,
mapping,
edge_attr=None,
node_attr=None,
parent_child=False,
add_root_name=None,
propagate=None,
ignore_orphan_terms=False,
verbose=True,
**kwargs):
"""Construct an Ontology object.
Parameters
----------
hierarchy : list, tuple
Iterable of (child term, parent term). E.g. list of 2-tuples
mapping : list, tuple
Iterable of (gene, term) pairs. E.g. list of 2-tuples
edge_attr : pandas.DataFrame
Meta-data describing (child_term, parent_term)
pairs. Suggestion: The index of the DataFrame must be a
pandas.MultiIndex, where the first level is the child term
and the second level is the parent term.
parent_child : bool
If True, then the definitions of <hierarchy> and <mapping>
are reversed so that they iterate over (parent term, child
term) and (term, gene) pairs.
propagate : None, str
The direction ('forward' or 'reverse') to propagate
gene-term annotations up the hierarchy with
Ontology.propagate(). If None, then don't
propagate annotations.
add_root_name : bool
The name of an artificial root. If there are multiple
roots in the ontology, then they are joined into one root
with this name. Default: Don't create this root.
ignore_orphan_terms : bool
"""
if 'empty' in kwargs and kwargs['empty'] is True:
return
if parent_child:
hierarchy = [(x[1],x[0]) for x in hierarchy]
mapping = [(x[1],x[0]) for x in mapping]
# Cast all node names to strings
hierarchy = [(str(x[0]),str(x[1])) for x in hierarchy]
mapping = [(str(x[0]),str(x[1])) for x in mapping]
## Read term-to-term edges
# parent_2_child[<term_name>] --> list of <term_name>'s children terms
self.parent_2_child = {r: [p[0] for p in q] for r, q in \
itertools.groupby(sorted(hierarchy,
key=lambda a:a[1]),
key=lambda a:a[1])}
## Read gene-to-term edges
# self.gene_2_term[<gene_name>] --> list of terms that <gene_name> is mapped to
self.gene_2_term = {key: set([a[1] for a in group]) for key, group in \
itertools.groupby(sorted(mapping,
key=lambda a:a[0]),
key=lambda a:a[0])}
## Check that the set of terms is the same according to
## parent_2_child and self.gene_2_term
terms_A = set.union(set(self.parent_2_child.keys()),
*[set(x) for x in self.parent_2_child.values()])
if len(self.gene_2_term) > 0:
terms_B = set.union(*self.gene_2_term.values())
else:
terms_B = set([])
if verbose and ignore_orphan_terms and len(terms_B - terms_A)>0:
print('WARNING: Ignoring {} terms are connected to genes but not to other terms'.format(len(terms_B - terms_A)))
# if verbose and len(terms_A - terms_B)>0:
# print 'WARNING: {} terms connected to other terms but not to genes'.format(len(terms_A - terms_B))
if ignore_orphan_terms:
self.terms = sorted(terms_A)
else:
self.terms = sorted(terms_A | terms_B)
self.genes = sorted(self.gene_2_term.keys())
if add_root_name is not None:
root_list = self.get_roots()
if len(root_list) > 1:
print('Unifying %s roots into one super-root' % len(root_list))
self.parent_2_child[add_root_name] = root_list
self.terms.append(add_root_name)
## terms_index[<term_name>] --> index in self.terms
self.terms_index = make_index(self.terms)
## self.genes_index[<gene_name>] --> index in self.genes
self.genes_index = make_index(self.genes)
## Convert self.gene_2_term to list term indices rather than term names
for k, v in self.gene_2_term.items():
self.gene_2_term[k] = [self.terms_index[x] for x in self.gene_2_term[k] if x in self.terms_index]
if node_attr is None:
self.clear_node_attr()
else:
assert node_attr.index.nlevels == 1
if node_attr.index.name != 'Node':
# if verbose:
# print("Changing node_attr index name from %s to 'Node'" % node_attr.index.name)
# # import traceback
# # print traceback.print_stack()
node_attr.index.name = 'Node'
self.node_attr = node_attr
if edge_attr is None:
self.clear_edge_attr()
else:
assert edge_attr.index.nlevels == 2
edge_attr.index.names = ['Child', 'Parent']
# if 'Child' in edge_attr.index.names and 'Parent' in edge_attr.index.names:
# edge_attr.index = edge_attr.index[['Child', 'Parent']]
# else:
# edge_attr.index.names = ['Child', 'Parent']
# if edge_attr.index.names != ['Child', 'Parent']:
# if verbose:
# print("Changing edge_attr index names from %s to ['Child', 'Parent']" % edge_attr.index.names)
# edge_attr.index.names = ['Child', 'Parent']
self.edge_attr = edge_attr
self._update_fields()
if propagate:
self.propagate(direction=propagate, inplace=True)
self._update_fields()
self._check_valid()
# ## Not necessary and requires extra start-up time (perhaps set as a __init__ parameter to precalculate many things)
# empty_terms = sum([x==0 for x in self.term_sizes])
# if verbose and empty_terms > 0:
# print 'WARNING: {} terms are connected to other terms but not to genes'.format(empty_terms), [t for t, x in zip(self.terms, self.term_sizes) if x==0][:5]
# # import traceback
# # print traceback.print_stack()
def _update_fields(self, reset_term_sizes=True):
self.child_2_parent = self._get_child_2_parent()
self.term_2_gene = self._get_term_2_gene()
if reset_term_sizes:
self._term_sizes = None
for t in self.terms:
if t not in self.parent_2_child:
self.parent_2_child[t] = []
if t not in self.child_2_parent:
self.child_2_parent[t] = []
def add_root(self, root_name, inplace=False):
"""Check if there is a single unifying root term of the ontology. If
not, then identify the multiple roots and join them under an
artificial root."""
if inplace:
ont = self
else:
ont = self.copy()
assert root_name not in ont.terms
root_list = ont.get_roots()
if len(root_list) > 1:
print('Unifying %s roots into one super-root' % len(root_list))
ont.parent_2_child[root_name] = root_list
ont.terms.append(root_name)
ont.terms_index = make_index(sorted(ont.terms))
for g, t_list in ont.gene_2_term.items():
ont.gene_2_term[g] = [ont.terms_index[ont.terms[t]] for t in t_list]
ont.terms.sort()
ont._update_fields()
return ont
def _get_child_2_parent(self):
"""
Converts self.parent_2_child to child_2_parent
# child_2_parent[<term_name>] --> list of <term_name>'s parent term names
"""
cp_pairs = []
for p, c_list in self.parent_2_child.items():
for c in c_list:
cp_pairs.append((c,p))
first = lambda a: a[0]
cp_pairs.sort(key=first)
child_2_parent = {
r: [p[1] for p in q] for r, q in
itertools.groupby(cp_pairs, key=first)
}
for t in self.terms:
if t not in child_2_parent:
child_2_parent[t] = []
return child_2_parent
def clear_node_attr(self):
"""Resets the node attributes to be empty."""
self.node_attr = pd.DataFrame()
self.node_attr.index.name = 'Node'
def clear_edge_attr(self):
"""Resets the edge attributes to be empty."""
self.edge_attr = pd.DataFrame()
self.edge_attr.index = pd.MultiIndex(levels=[[],[]],
codes=[[],[]],
names=['Child', 'Parent'])
def update_node_attr(self, node_attr):
"""Update existing node attributes or add new node attributes.
Parameters
----------
node_attr : pandas.DataFrame
Dataframe where index are the names of genes or terms and
where the columns are the names of node attributes.
"""
####
# TODO : make sure that renaming/deleting/collapsing of genes and columns respect the node_attr and edge_attr
# Filter for genes and terms in the ontology
nodes = set(self.genes) | set(self.terms)
node_attr = node_attr.loc[[x for x in node_attr.index if x in nodes], :]
assert node_attr.index.duplicated().sum() == 0
# Update index to the union of current and new node_attr
self.node_attr = self.node_attr.reindex(self.node_attr.index.union(node_attr.index))
# Update columns
for col in node_attr.columns:
self.node_attr.loc[node_attr.index, col] = node_attr[col]
def update_edge_attr(self, edge_attr):
"""Update existing edge attributes or add new edge attributes.
Parameters
----------
edge_attr : pandas.DataFrame
Dataframe where the index is a MultiIndex represents edges
in the Ontology, such that the first level is the name of
a gene or child term, and the second level is the name of
a parent term. Columns are the names of edge attributes.
"""
# Filter for genes and terms in the ontology
edges = []
for child, parent_list in self.child_2_parent.items():
for parent in parent_list:
edges.append((child, parent))
for gene, term_list in self.gene_2_term.items():
for term in term_list:
edges.append((gene, self.terms[term]))
edges = set(edges)
edge_attr = edge_attr.loc[[x for x in edge_attr.index if x in edges], :]
assert edge_attr.index.duplicated().sum() == 0
# Update index
self.edge_attr = self.edge_attr.reindex(self.edge_attr.index.union(edge_attr.index))
# Update values for overlapping columns
for col in edge_attr.columns:
self.edge_attr.loc[edge_attr.index, col] = edge_attr[col].values
def get_roots(self):
"""Returns a list of the root term(s).
Returns
-------
: list
"""
tmp = set(self.terms) - set([y for x in self.parent_2_child.values() for y in x])
return sorted(tmp)
def _make_dummy(self, tree_edges=None):
"""For each term T in the ontology, create a new dummy term that
indirectly connect T's to T. For example, if g1 and g2 are in
T, then a new term dummy_T is created so that the new ontology
consists of
g1 --> T_dummy
g2 --> T_dummy
T_dummy --> T
Parameters
----------
tree_edges : list
List of (child, parent) edges that constitute a spanning
tree of the ontology. If specified, then for each term T,
only the genes that are connected to T in the spanning
tree will be re-routed to the dummy node.
Default: None. This restriction will not apply
Returns
-------
: ddot.Ontology.Ontology
"""
ont = self
new_gene_2_term = []
new_child_2_parent = []
for t in ont.terms:
used_dummy = False
if len(ont.parent_2_child[t]) > 0:
dummy_term = 'dummy2_%s' % t
else:
dummy_term = t
for g in [ont.genes[g] for g in ont.term_2_gene[t]]:
if (tree_edges is None) or (g,t) in tree_edges:
new_gene_2_term.append((g, dummy_term))
used_dummy=True
if used_dummy and dummy_term != t:
new_child_2_parent.append([dummy_term, t])
for p in ont.child_2_parent[t]:
if (tree_edges is None) or (t,p) in tree_edges:
new_child_2_parent.append((t, p))
ont_dummy = Ontology(new_child_2_parent, new_gene_2_term)
return ont_dummy
def _collect_transform(self,
tree_edges=None,
hidden_gene=True,
hidden_parent=True,
hidden_child=True):
"""
Creates intermediate duplicate nodes
"""
ont = self
if tree_edges is None:
tree_edges = self.get_tree()
nodes_copy = {v : 1 for v in ont.genes + ont.terms}
def get_copy(u):
u_name = '%s.%s' % (u, nodes_copy[u])
nodes_copy[u] += 1
return u_name
collect_nodes = []
new_gene_2_term = []
new_child_2_parent = []
for t in ont.terms:
## Gene-term connections
collect_hidden_gene = 'collect_hidden_gene_%s' % t
used_hidden_gene = False
for g in [ont.genes[g] for g in ont.term_2_gene[t]]:
if (not hidden_gene) or ((g, t) in tree_edges):
new_gene_2_term.append((g, collect_hidden_gene))
used_hidden_gene = True
else:
new_gene_2_term.append((get_copy(g), collect_hidden_gene))
used_hidden_gene = True
if used_hidden_gene:
collect_nodes.append(collect_hidden_gene)
new_child_2_parent.append((collect_hidden_gene, t))
## Parent-child term connections
collect_hidden_child = 'collect_hidden_child_%s' % t
collect_hidden_parent = 'collect_hidden_parent_%s' % t
used_hidden_child, used_hidden_parent = False, False
for c in ont.parent_2_child[t]:
if (not hidden_child) or ((c,t) in tree_edges):
new_child_2_parent.append((c,t))
else:
new_child_2_parent.append((get_copy(c), collect_hidden_child))
used_hidden_child = True
for p in ont.child_2_parent[t]:
if hidden_parent and ((t,p) not in tree_edges):
new_child_2_parent.append((get_copy(p), collect_hidden_parent))
used_hidden_parent = True
if used_hidden_child:
collect_nodes.append(collect_hidden_child)
new_child_2_parent.append((collect_hidden_child, t))
if used_hidden_parent:
collect_nodes.append(collect_hidden_parent)
new_child_2_parent.append((collect_hidden_parent, t))
ont_collect = Ontology(new_child_2_parent,
new_gene_2_term,
node_attr=ont.node_attr.copy(),
edge_attr=ont.edge_attr.copy(),
verbose=False)
##################################################
# Set Original_Name and Size for Duplicate Nodes #
new_and_orig = [('%s.%s' %(v,i), v) for v, copy_num in nodes_copy.items()
for i in (range(1, copy_num) if copy_num>1 else [])]
new_2_orig = dict(new_and_orig)
df = pd.DataFrame({'orig_tmp' : [x[1] for x in new_and_orig],
'Hidden' : True},
index=[x[0] for x in new_and_orig])
df = df.astype({'orig_tmp' : np.str, 'Hidden' : np.bool})
# For duplicate nodes, set the Original_Name attribute to the name of the original node
merge = pd.merge(df, ont.node_attr, how='left', left_on=['orig_tmp'], right_index=True)
if 'Original_Name' in merge:
unset = pd.isnull(merge['Original_Name']).values
merge.loc[unset, 'Original_Name'] = df.loc[unset, 'orig_tmp'].values
else:
merge['Original_Name'] = df['orig_tmp'].values
del merge['orig_tmp']
# Set the 'Size' attribute of duplicate nodes to be the 'Size'
# of the original node. If the original node is a term with no
# 'Size' attribute, then set 'Size' to be the number of genes
# in the term
in_merge = set(merge.index)
for node in merge.index:
if node in new_2_orig:
orig = new_2_orig[node]
if orig in in_merge and not pd.isnull(merge.loc[orig, 'Size']):
merge.loc[node, 'Size'] = merge.loc[new_2_orig[node], 'Size']
elif orig in ont.terms_index:
merge.loc[node, 'Size'] = ont.term_sizes[ont.terms_index[orig]]
# Append attributes for the new nodes
try:
# Used for pandas version >= 0.23
ont_collect.node_attr = pd.concat([ont.node_attr, merge], axis=0, sort=True)
except:
ont_collect.node_attr = pd.concat([ont.node_attr, merge], axis=0)
########################################
# Set Label and Size for collect nodes #
########################################
def get_label(x):
if 'hidden_child' in x:
return 'Linked Children'
elif 'hidden_parent' in x:
return 'Linked Parents'
elif 'hidden_gene' in x:
return 'Linked Genes'
elif 'tree_gene' in x:
return 'Genes'
collect_attr = pd.DataFrame(
{'Size' : 1,
'Label' : [get_label(x) for x in collect_nodes],
'is_collect_node' : True},
index=collect_nodes)
ont_collect.update_node_attr(collect_attr)
return ont_collect
def unfold(self,
duplicate=None,
genes_only=False,
levels=None,
tree_edges=None):
"""Traverses the ontology from the root to the leaves while
duplicating nodes during the traversal to create a tree representation.
Traverse the ontology from the root nodes to the leaves in a
breadth-first manner. Each time a node is traversed, then
create a duplicate of it
Parameters
----------
duplicate : list
Nodes to duplicate for unfolding. Default: all genes and terms
genes_only : bool
If True, then duplicate all of the genes and none of the terms. Default: False
levels :
"""
ont = self.propagate(direction='reverse', inplace=False)
hidden_mode = levels is not None
if hidden_mode:
if tree_edges is None:
tree_edges = self.get_tree()
hidden_depth = {}
if genes_only:
duplicate = ont.genes
elif duplicate is None:
duplicate = ont.genes + ont.terms
nodes_copy = {x : 0 for x in duplicate}
def get_name(u):
if u in nodes_copy:
u_name = '%s.%s' % (u, nodes_copy[u])
nodes_copy[u] += 1
else:
u_name = u
return u_name
to_expand = []
new_2_orig = {}
for u in ont.get_roots():
u_name = get_name(u)
new_2_orig[u_name] = u
to_expand.append(u_name)
if hidden_mode:
hidden_depth[u_name] = 0
expanded = set(to_expand)
hierarchy, mapping = [], []
# Manual bfs
curr = 0
while curr < len(to_expand):
v_name = to_expand[curr]
v = new_2_orig[v_name]
for u in [ont.genes[u] for u in ont.term_2_gene[v]]:
u_name = get_name(u)
new_2_orig[u_name] = u
mapping.append((u_name, v_name))
if hidden_mode:
v_depth = hidden_depth[v_name]
if v_depth==0:
if (u,v) in tree_edges:
hidden_depth[u_name] = 0
else:
hidden_depth[u_name] = 1
elif v_depth < levels:
hidden_depth[u_name] = v_depth + 1
for u in ont.parent_2_child[v]:
u_name = get_name(u)
new_2_orig[u_name] = u
hierarchy.append((u_name, v_name))
if hidden_mode:
v_depth = hidden_depth[v_name]
insert = u_name not in expanded
if v_depth==0 and ((u,v) in tree_edges):
hidden_depth[u_name] = 0
elif v_depth < levels:
hidden_depth[u_name] = v_depth + 1
else:
insert = False
else:
insert = u_name not in expanded
if insert:
to_expand.append(u_name)
expanded.add(u_name)
curr += 1
new_nodes, orig_nodes = zip(*new_2_orig.items())
new_nodes, orig_nodes = list(new_nodes), list(orig_nodes)
ont.node_attr = ont.node_attr.reindex(list(set(orig_nodes)))
node_attr = ont.node_attr.loc[orig_nodes, :].copy()
if 'Original_Name' in node_attr:
unset = pd.isnull(node_attr['Original_Name']).values
node_attr.loc[unset, 'Original_Name'] = np.array(orig_nodes)[unset]
else:
node_attr['Original_Name'] = orig_nodes
if hidden_mode:
node_attr['Level'] = [hidden_depth[v] for v in new_nodes]
node_attr.index = new_nodes
node_attr.dropna(axis=0, how='all', inplace=True)
new_edges = hierarchy + mapping
old_edges = [(new_2_orig[u], new_2_orig[v]) for u, v in new_edges]
in_index = [x in ont.edge_attr.index for x in old_edges]
if sum(in_index) > 0:
edge_attr = ont.edge_attr.loc[[x for x, y in zip(old_edges, in_index) if y], :].copy()
edge_attr.index = pd.MultiIndex.from_tuples([x for x, y in zip(new_edges, in_index) if y])
edge_attr.dropna(axis=0, how='all', inplace=True)
else:
edge_attr = None
ont = Ontology(hierarchy,
mapping,
edge_attr=edge_attr,
node_attr=node_attr,
parent_child=False,
verbose=False)
return ont
def _to_networkx_no_layout(self):
G = nx.DiGraph()
#################################
### Add nodes and node attributes
G.add_nodes_from(self.genes + self.terms)
set_node_attributes_from_pandas(G, self.node_attr)
# Ensure that all 'Size' values are the same numeric type
if 'Size' in self.node_attr.columns:
dtype = self.node_attr['Size'].dtype
if dtype in [np.dtype('float16'), np.dtype('float32'), np.dtype('float64')]:
dtype = float
else:
dtype = int
else:
dtype = int
for t in self.terms:
G.node[t][self.NODETYPE_ATTR] = self.TERM_NODETYPE
if ('Size' not in G.node[t]) or pd.isnull(G.node[t]['Size']):
G.node[t]['Size'] = dtype(self.term_sizes[self.terms_index[t]])
G.node[t]['isRoot'] = False
for g in self.genes:
G.node[g][self.NODETYPE_ATTR] = self.GENE_NODETYPE
if ('Size' not in G.node[g]) or pd.isnull(G.node[g]['Size']):
G.node[g]['Size'] = dtype(1)
G.node[g]['isRoot'] = False
# Identify the root
root = self.get_roots()[0]
G.node[root]['isRoot'] = True
# Set the node attribute 'Label'. If the node has a "Original
# Name" attribute, indicating that it is a duplicate, then use
# that. Otherwise, use the node's name.
for x in self.genes + self.terms:
data = G.node[x]
if ('Label' not in data) or pd.isnull(data['Label']):
if ('Original_Name' in data) and (not pd.isnull(data['Original_Name'])):
data['Label'] = data['Original_Name']
else:
data['Label'] = x
#################################
### Add edges and edge attributes
G.add_edges_from([(g, self.terms[t],
{self.EDGETYPE_ATTR : self.GENE_TERM_EDGETYPE}) \
for g in self.genes for t in self.gene_2_term[g]])
G.add_edges_from([(c, p,
{self.EDGETYPE_ATTR : self.CHILD_PARENT_EDGETYPE}) \
for p in self.terms for c in self.parent_2_child.get(p, [])])
set_edge_attributes_from_pandas(G, self.edge_attr)
return G
def expand(self, spanning_tree=True):
if spanning_tree is True:
ont = self._collect_transform()
else:
# Assume a list of tree edges are supplied
ont = self._collect_transform(spanning_tree)
G_tree = ont.get_tree(ret='ontology')._to_networkx_no_layout()
pos = bubble_layout_nx(G_tree)
tmp = ont.node_attr[['Label', 'is_collect_node']].dropna()
collect_nodes = tmp[tmp['is_collect_node']].index
gridify(collect_nodes, pos, G_tree)
## Remove collector nodes
def decide_delete(v):
return ((not layout_params['hidden_parent'] and v=='Linked Parents') or
(not layout_params['hidden_child'] and v=='Linked Children') or
(not layout_params['hidden_gene'] and v=='Linked Genes'))
tmp = ont.node_attr[['Label', 'is_collect_node']].dropna()
tmp = tmp[tmp['is_collect_node']]
tmp = tmp[tmp['Label'].apply(decide_delete)]
to_delete = tmp.index.tolist()
ont_red = ont
if len(to_delete) > 0:
# Need fast special delete
ont_red = ont_red.delete(to_delete=to_delete, preserve_transitivity=True)
# Set the original term sizes for the original copy of
# each term (not the duplicates)
ont_red.update_node_attr(pd.DataFrame({'Size' : self.term_sizes}, index=self.terms))
G = ont_red._to_networkx_no_layout()
nodes_set = set(G.nodes())
G.pos = {n : (float(scale*p[0]), float(scale*p[1])) for n, p in pos.items() if n in nodes_set}
nx_set_tree_edges(G, ont_red.get_tree())
######################################################
# TODO: move this visual styling outside of the layout
# functionality
nx.set_edge_attributes(G, values='ARROW', name='Vis:EDGE_SOURCE_ARROW_SHAPE')
nx.set_edge_attributes(G, values='NONE', name='Vis:EDGE_TARGET_ARROW_SHAPE')
for v, data in G.nodes(data=True):
# if 'collect_hidden' in v and 'is_collect_node' in data and data['is_collect_node']:
# for u in G.predecessors(v):
# G.node[u]['Vis:Fill Color'] = '#3182BD'
try:
if 'collect_hidden_parent' in v and 'is_collect_node' in data and data['is_collect_node']:
for _, _, data in G.in_edges(v, data=True):
data["Vis:EDGE_TARGET_ARROW_SHAPE"] = 'ARROW'
data["Vis:EDGE_SOURCE_ARROW_SHAPE"] = 'NONE'
except:
print(data)
print('v', v)
print('collect_hidden_parent' in v)
print('is_collect_node' in data)
print(data['is_collect_node'])
raise
def to_networkx(self,
layout='bubble',
spanning_tree=True,
layout_params=None,
verbose=False):
"""Converts Ontology into a NetworkX object.
Parameters
----------
node_attr : pandas.DataFrame
Meta-data about genes and terms that will be included as node
attributes in the NetworkX object.
edge_attr : pandas.DataFrame
Meta-data about connections among genes and terms that
will be included as edge attributes in the NetworkX
object.
spanning_tree : bool
If True, then identify a spanning tree of the DAG. include
an edge attribute "Is_Tree_Edge" that indicates
layout : str
The name of the layout algorithm for laying out the
Ontology as a graph. Node positions are astored in the
node attributes 'x_pos' and 'y_pos'. If None, then do not
perform a layout.
Returns
-------
: nx.DiGraph
"""
default_layout_params = {'hidden_parent' : True,
'hidden_child' : False,
'hidden_gene' : False}
if layout_params is not None:
default_layout_params.update(layout_params)
layout_params = default_layout_params
if spanning_tree:
scale = 1
if layout is None or layout=='bubble':
G = self._to_networkx_no_layout()
if spanning_tree is True:
tree_edges = self.get_tree()
else:
tree_edges = spanning_tree
nx_set_tree_edges(G, tree_edges)
if layout=='bubble':
G_tree = self.propagate('reverse')._make_dummy(tree_edges)._to_networkx_no_layout()
pos = bubble_layout_nx(G_tree, verbose=verbose)
gridify([v for v in G_tree.nodes() if 'dummy2' in v], pos, G_tree)
G.pos = {n : (float(scale*p[0]), float(scale*p[1])) for n, p in pos.items() if 'dummy2' not in n}
elif layout=='bubble-collect':
if spanning_tree is True:
ont = self._collect_transform()
else:
# Assume a list of tree edges are supplied
ont = self._collect_transform(spanning_tree)
G_tree = ont.get_tree(ret='ontology')._to_networkx_no_layout()
pos = bubble_layout_nx(G_tree)
tmp = ont.node_attr[['Label', 'is_collect_node']].dropna()
collect_nodes = tmp[tmp['is_collect_node']].index
gridify(collect_nodes, pos, G_tree)
## Remove collector nodes
def decide_delete(v):
return ((not layout_params['hidden_parent'] and v=='Linked Parents') or
(not layout_params['hidden_child'] and v=='Linked Children') or
(not layout_params['hidden_gene'] and v=='Linked Genes'))
tmp = ont.node_attr[['Label', 'is_collect_node']].dropna()
tmp = tmp[tmp['is_collect_node']]
tmp = tmp[tmp['Label'].apply(decide_delete)]
to_delete = tmp.index.tolist()
ont_red = ont
if len(to_delete) > 0:
# Need fast special delete
ont_red = ont_red.delete(to_delete=to_delete, preserve_transitivity=True)
# Set the original term sizes for the original copy of
# each term (not the duplicates)
ont_red.update_node_attr(pd.DataFrame({'Size' : self.term_sizes}, index=self.terms))
G = ont_red._to_networkx_no_layout()
nodes_set = set(G.nodes())
G.pos = {n : (float(scale*p[0]), float(scale*p[1])) for n, p in pos.items() if n in nodes_set}
nx_set_tree_edges(G, ont_red.get_tree())
######################################################
# TODO: move this visual styling outside of the layout
# functionality
nx.set_edge_attributes(G, values='ARROW', name='Vis:EDGE_SOURCE_ARROW_SHAPE')
nx.set_edge_attributes(G, values='NONE', name='Vis:EDGE_TARGET_ARROW_SHAPE')
for v, data in G.nodes(data=True):
# if 'collect_hidden' in v and 'is_collect_node' in data and data['is_collect_node']:
# for u in G.predecessors(v):
# G.node[u]['Vis:Fill Color'] = '#3182BD'
try:
if 'collect_hidden_parent' in v and 'is_collect_node' in data and data['is_collect_node']:
for _, _, data in G.in_edges(v, data=True):
data["Vis:EDGE_TARGET_ARROW_SHAPE"] = 'ARROW'
data["Vis:EDGE_SOURCE_ARROW_SHAPE"] = 'NONE'
except:
print(data)
print('v', v)
print('collect_hidden_parent' in v)
print('is_collect_node' in data)
print(data['is_collect_node'])
raise
else:
raise Exception('Unsupported layout: %s', layout)
if layout is not None:
nx.set_node_attributes(G, values={n : x for n, (x,y) in G.pos.items()}, name='x_pos')
nx.set_node_attributes(G, values={n : y for n, (x,y) in G.pos.items()}, name='y_pos')
else:
G = self._to_networkx_no_layout()
return G
@classmethod
def from_table(cls,
table,
parent=0,
child=1,
is_mapping=None,
mapping=None,
mapping_parent=0,
mapping_child=1,
header=0,
propagate=False,
verbose=False,
clixo_format=False,
clear_default_attr=True,
**kwargs):
"""Create Ontology from a tab-delimited table or pandas DataFrame.
Duplicate gene-term or term-term connections in the table are removed.
Parameters
----------
table : pandas.DataFrame, file-like object, or filename
A table that lists (child term, parent term) pairs. If
mapping==None, then this table should also include (gene,
term) pairs.
parent : int or str
Column for parent terms in table (index or name of column)
child : int or str
Column for child terms and genes in table (index or name of column)
is_mapping : function
A function that is applied on each row and returns True if
the row represents a (gene, term) pair and False
otherwise. This function is only applied when a separate
table of (gene, term) pairs is not specified,
i.e. mapping==None.
The default function is `lambda row: row[2]=={0}`
which tests if the third column equals the string "{0}".
mapping : pandas.DataFrame, file-like object, or filename (optional)
A separate table listing only (gene, term) pairs
mapping_parent : int or str
Column for terms in mapping table (index or name of column)
mappping_child : int or str
Column for genes in mapping table (index or name of column)
header : int or None
Row number to use as the column names, which are then
stored in the resulting Ontology object's `edge_attr`
field. For example if `header=0` (default), then the first
row is assumed to be column names. If `header=None`, then
no column names are assumed.
propagate : None or str
The direction ('forward' or 'reverse') for propagating
gene-term annotations up the hierarchy with
Ontology.propagate(). If None, then don't
propagate annotations.
clixo_format : bool
If True, The table is assumed to be in the same format
produced by the CLIXO C++ implementation. In particular,
table has three columns:
Column 1) Parent Term
Column 2) Child Term or Gene
Column 3) The string "gene" if the row is a
gene-term mapping, otherwise the string "default".
The table is also assumed to have no column headers (i.e. header=False)
clear_default_attr: bool
If True (default), then remove the edge attribute
'EdgeType' created using Ontology.to_table(). This
attribute was created to make the table be an equivalent
representation of an Ontology object; however, it is no
longer necessary after reconstructing the Ontology object.
Returns
-------
: ddot.Ontology.Ontology
""".format(cls.GENE_TERM_EDGETYPE)
if clixo_format:
ont = cls.from_table(
table,
parent=0,
child=1,
is_mapping=lambda x: x[2]=='gene',
header=None,
clixo_format=False,
verbose=verbose)
ont.edge_attr.columns = map(str, ont.edge_attr.columns)
del ont.edge_attr['2']
return ont
if is_mapping is None:
if mapping is None:
# print('WARNING: no gene-term connections '
# 'were specified by the is_mapping '
# 'function or separate table. '
# 'Default: assume a gene-term connection when the 3rd column equals %s' % cls.GENE_TERM_EDGETYPE)
is_mapping = lambda x: x.iloc[2]==cls.GENE_TERM_EDGETYPE
# Read table
try:
table = pd.read_table(table, comment='#', header=header)
except:
assert isinstance(table, pd.DataFrame)
if child not in table.columns:
child = table.columns[child]
if parent not in table.columns:
parent = table.columns[parent]
for col in [child, parent]:
table.loc[:,col] = table.loc[:,col].astype(str)
edge_attr = table.set_index([child, parent])
edge_attr.index.rename(['Child', 'Parent'], inplace=True)
if mapping is None:
# Extract gene-term connections from table
mask = table.apply(is_mapping, axis=1)
mapping = table.loc[mask, :].loc[:,[child, parent]]
hierarchy = table.loc[~mask, :].loc[:,[child, parent]]
mapping_child, mapping_parent = child, parent
else:
# Read separate table of gene-term connections
try:
mapping = pd.read_table(mapping, comment='#', header=header)
except:
assert isinstance(mapping, pd.DataFrame)
if mapping_child not in mapping.columns:
mapping_child = mapping.columns[mapping_child]
if mapping_parent not in mapping.columns:
mapping_parent = mapping.columns[mapping_parent]
for col in [mapping_child, mapping_parent]:
mapping.loc[:,col] = mapping.loc[:,col].astype(str)
mapping_attr = mapping.set_index([mapping_child, mapping_parent])
mapping_attr.index.rename(['Child', 'Parent'], inplace=True)
try:
# Used for pandas version >= 0.23
edge_attr = pd.concat([edge_attr, mapping_attr], sort=True)
except:
edge_attr = pd.concat([edge_attr, mapping_attr])
mapping = mapping.loc[:,[mapping_child, mapping_parent]]
hierarchy = table.loc[:,[child, parent]]
dups = mapping.duplicated([mapping_child, mapping_parent]).sum()
if dups > 0:
print('WARNING: Dropping %s duplicate gene-term connections' % dups)
mapping.drop_duplicates([mapping_child, mapping_parent], inplace=True)
dups = hierarchy.duplicated([child, parent]).sum()
if dups > 0:
print('WARNING: Dropping %s duplicate term-term connections' % dups)
hierarchy.drop_duplicates([child, parent], inplace=True)
edge_attr = edge_attr.loc[~ edge_attr.index.duplicated(), :]
edge_attr.index.names = ['Child', 'Parent']
if clear_default_attr:
if cls.EDGETYPE_ATTR in edge_attr:
del edge_attr[cls.EDGETYPE_ATTR]
mapping, hierarchy = mapping.values.tolist(), hierarchy.values.tolist()
return cls(hierarchy,
mapping,
parent_child=False,
edge_attr=edge_attr,
propagate=propagate,
verbose=verbose,
**kwargs)
@classmethod
def from_scipy_linkage(cls, Z):
"""Creates an Ontology object from a linkage matrix created by scipy's
hierarchical/agglomerative clustering. Note that this form of
clustering produces a binary tree.
"""
import scipy.cluster.hierarchy
rootnode, nodelist = scipy.cluster.hierarchy.to_tree(Z, rd=True)
leaves = set(scipy.cluster.hierarchy.leaves_list(Z))
hierarchy, mapping = [], []
for v in nodelist:
v_id = v.get_id()
if v.get_left():
child = v.get_left().get_id()
if child in leaves:
mapping.append((v_id, child))
else:
hierarchy.append((v_id, child))
if v.get_right():
child = v.get_right().get_id()
if child in leaves:
mapping.append((v_id, child))
else:
hierarchy.append((v_id, child))
return cls(hierarchy, mapping, parent_child=True)
@classmethod
def from_ndex(cls,
ndex_uuid,
ndex_user=None,
ndex_pass=None,
ndex_server=None,
edgetype_attr=None,
edgetype_value=None):
"""Reads an Ontology stored on NDEx. Gene and terms are distinguished
according by an edge attribute.
Parameters
----------
ndex_uuid : str
NDEx UUID of ontology
edgetype_attr : str
Name of the edge attribute that distinguishes a (gene,
term) pair from a (child term, parent term) pair
gene_value : str
Value of the edge attribute for (gene, term) pairs
Returns
-------
: ddot.Ontology.Ontology
"""
if ndex_server is None:
ndex_server = ddot.config.ndex_server
if ndex_user is None:
ndex_user = ddot.config.ndex_user
if ndex_pass is None:
ndex_pass = ddot.config.ndex_pass
if '/' in ndex_uuid:
ndex_server = parse_ndex_server(ndex_uuid)
ndex_uuid = parse_ndex_uuid(ndex_uuid)
G = NdexGraph(
server=ndex_server,
username=ndex_user,
password=<PASSWORD>,
uuid=ndex_uuid)
return cls.from_NdexGraph(
G,
edgetype_attr=edgetype_attr,
edgetype_value=edgetype_value)
@classmethod
def from_NdexGraph(cls,
G,
edgetype_attr=None,
edgetype_value=None):
"""Converts a NdexGraph object to an Ontology object. Gene and terms
are distinguished by an edge attribute.
Parameters
----------
G : NdexGraph
edgetype_attr : str
Name of the edge attribute that distinguishes a (gene,
term) pair from a (child term, parent term) pair
gene_value : str
Value of the edge attribute for (gene, term) pairs
Returns
-------
: ddot.Ontology.Ontology
"""
return cls.from_networkx(
NdexGraph_to_nx(G),
edgetype_attr=edgetype_attr,
edgetype_value=edgetype_value)
@classmethod
def from_networkx(cls,
G,
edgetype_attr=None,
edgetype_value=None,
clear_default_attr=True):
"""Converts a NetworkX object to an Ontology object. Gene and terms
are distinguished by an edge attribute.
Parameters
----------
G : nx.DiGraph
edgetype_attr : str
Name of the edge attribute that distinguishes a (gene,
term) pair from a (child term, parent term) pair
edgetype_value : str
Value of the edge attribute for (gene, term) pairs
clear_default_attr : bool
If True (default), then remove the node and edge
attributes that are created in a NetworkX graph using
Ontology.to_networkx() or Ontology.to_ndex(). These
attributes include 'Label', 'Size', 'NodeType', and
'EdgeType'. These attributes were created to make the
NetworkX graph be an equivalent representation of an
Ontology object; however, they are no longer necessary
after reconstrcting the Ontology object.
Returns
-------
: ddot.Ontology.Ontology
"""
if edgetype_attr is None:
edgetype_attr=cls.EDGETYPE_ATTR
if edgetype_value is None:
edgetype_value=cls.GENE_TERM_EDGETYPE
hierarchy = []
mapping = []
for u, v, attr in G.edges(data=True):
if attr[edgetype_attr] == edgetype_value:
mapping.append((u, v))
else:
hierarchy.append((u, v))
edge_attr = nx_edges_to_pandas(G)
node_attr = nx_nodes_to_pandas(G)
ont = cls(hierarchy,
mapping,
node_attr=node_attr,
edge_attr=edge_attr)
if clear_default_attr:
for attr in [Ontology.NODETYPE_ATTR, 'Label', 'Size', 'isRoot', 'x_pos', 'y_pos']:
if attr in ont.node_attr.columns:
del ont.node_attr[attr]
for attr in [edgetype_attr, 'Is_Tree_Edge']:
if attr in ont.edge_attr.columns:
del ont.edge_attr[attr]
return ont
@classmethod
def from_igraph(cls,
G,
edgetype_attr=None,
edgetype_value=None,
verbose=False):
"""Converts a igraph Graph object to an Ontology object. Gene and terms
are distinguished by an edge attribute.
Parameters
----------
G : igraph.Graph
edgetype_attr : str
Name of the edge attribute that distinguishes a (gene,
term) pair from a (child term, parent term) pair
edgetype_value : str
Value of the edge attribute for (gene, term) pairs
Returns
-------
: ddot.Ontology.Ontology
"""
if edgetype_attr is None:
edgetype_attr=cls.EDGETYPE_ATTR
if edgetype_value is None:
edgetype_value=cls.GENE_TERM_EDGETYPE
hierarchy = []
mapping = []
for e in G.es:
u = G.vs[e.source]['name']
v = G.vs[e.target]['name']
if e[edgetype_attr] == edgetype_value:
mapping.append((u, v))
else:
hierarchy.append((u, v))
edge_attr = ig_edges_to_pandas(G)
node_attr = ig_nodes_to_pandas(G)
edge_attr.index.names = ['Child', 'Parent']
node_attr.index.name = 'Node'
ont = cls(hierarchy,
mapping,
node_attr=node_attr,
edge_attr=edge_attr,
verbose=verbose)
for attr in [Ontology.NODETYPE_ATTR]:
if attr in ont.node_attr.columns:
del ont.node_attr[attr]
for attr in [edgetype_attr, 'Is_Tree_Edge']:
if attr in ont.edge_attr.columns:
del ont.edge_attr[attr]
return ont
def collapse_ontology(self,
method='python',
to_keep=None,
min_term_size=2,
verbose=True):
"""Remove redundant and empty terms. When a term T is removed,
hierarchical relations are preserved by connecting every child
of T with every parent of T. This removal operation has the
nice property of being commutative, i.e. the order of removal
does not matter.
Parameters
-----------
method : str
If "mhkramer", then use the collapseRedundantNodes script
in the alignOntology package. If "python", then use an
internal Python script.
min_term_size : int
Remove terms that are below this size. TODO: not yet supported
Returns
-------
: ddot.ddot.Ontology
A new Ontology object
"""
if method=='mhkramer':
assert to_keep is None, 'to_keep is only supported for method=="python"'
# Propagate forward and then reverse
ont = self.copy()
ont = self.propagate(direction='forward', inplace=False)
ont.propagate(direction='reverse', inplace=True)
top_level = os.path.dirname(os.path.abspath(inspect.getfile(ddot)))
collapseRedundantNodes = os.path.join(top_level, 'alignOntology', 'collapseRedundantNodes')
# assert os.path.isdir(ddot.config.alignOntology)
# collapseRedundantNodes = os.path.join(ddot.config.alignOntology, 'collapseRedundantNodes')
assert os.path.isfile(collapseRedundantNodes)
with tempfile.NamedTemporaryFile('w', delete=False) as f:
ont.to_table(f, clixo_format=True)
try:
cmd = '%s %s' % (collapseRedundantNodes, f.name)
print('collapse command:', cmd)
p = Popen(shlex.split(cmd), shell=False, stdout=PIPE, stderr=PIPE)
collapsed, err = p.communicate()
collapsed = collapsed.decode()
finally:
os.remove(f.name)
ont = Ontology.from_table(
StringIO(collapsed),
is_mapping=lambda x: x[2]=='gene',
clixo_format=True
)
ont.clear_edge_attr()
ont.update_node_attr(self.node_attr)
ont.update_edge_attr(self.edge_attr)
return ont
elif method=='python':
ont = self.propagate('forward', inplace=False)
term_hash = {t : hash(tuple(g_list)) for t, g_list in ont.term_2_gene.items()}
to_collapse = set()
for p in ont.parent_2_child:
for c in ont.parent_2_child[p]:
if term_hash[p] == term_hash[c]:
to_collapse.add(p)
if min_term_size is not None:
to_collapse = to_collapse | set([t for t, s in zip(ont.terms, ont.term_sizes) if s < min_term_size])
if to_keep is not None:
to_collapse = to_collapse - set(to_keep)
# print('to_collapse:', sorted(to_collapse))
ont.propagate('reverse', inplace=True)
ont_red = ont.delete(to_delete=to_collapse, preserve_transitivity=True)
return ont_red
@classmethod
def mutual_collapse(cls,
ont1,
ont2,
verbose=False):
"""Collapses two ontologies to the common set of genes.
Parameters
-----------
ont1 : ddot.Ontology.Ontology
ont2 : ddot.Ontology.Ontology
Returns
-------
ont1_collapsed : ddot.Ontology.Ontology
ont2_collapsed : ddot.Ontology.Ontology
"""
common_genes = set(ont1.genes) & set(ont2.genes)
if verbose:
print('Common genes:', len(common_genes))
if len(common_genes) > 0:
ont1 = ont1.delete(to_delete=set(ont1.genes) - common_genes, inplace=False)
ont1_collapsed = ont1.collapse_ontology()
ont2 = ont2.delete(to_delete=set(ont2.genes) - common_genes, inplace=False)
ont2_collapsed = ont2.collapse_ontology()
else:
raise Exception('No common genes between ontologies')
if verbose:
print('ont1_collapsed:', ont1_collapsed.summary())
print('ont2_collapsed:', ont2_collapsed.summary())
return ont1_collapsed, ont2_collapsed
def focus(self,
branches=None,
genes=None,
collapse=False,
root=True,
verbose=True):
"""
"""
assert (branches is not None) or (genes is not None)
to_keep = np.array(self.genes + self.terms)
if branches is not None:
to_keep = to_keep[self.connected(to_keep, branches).sum(1) > 0]
if verbose:
print('Genes and Terms to keep:', to_keep.size)
if genes is not None:
to_keep = to_keep[self.connected(genes, to_keep).sum(0) > 0]
if verbose:
print('Genes and Terms to keep:', to_keep.size)
if root:
while True:
common_root = self.common_ancestors(to_keep, minimal=True)
if common_root in to_keep or len(common_root)<=1:
break
else:
print('Adding', common_root)
to_keep = np.append(to_keep, common_root)
ont = self.delete(to_keep=to_keep, preserve_transitivity=True)
if collapse:
ont = ont.collapse_ontology(method='python', to_keep=ont.get_roots())
df = ont.to_table(edge_attr=True)
new_connections = []
for t in ont.terms:
removed_genes = set([self.genes[g] for g in self.term_2_gene[t]]) - set([ont.genes[g] for g in ont.term_2_gene[t]])
removed_terms = set(self.parent_2_child[t]) - set(ont.parent_2_child[t])
if len(removed_genes) > 0:
new_connections.append(('%s_%s_other_genes' % (t, len(removed_genes)), t, self.GENE_TERM_EDGETYPE))
if len(removed_terms) > 0:
new_connections.append(('%s_%s_other_terms' % (t, len(removed_terms)), t, self.CHILD_PARENT_EDGETYPE))
if len(new_connections) > 0:
new_connections = pd.DataFrame(new_connections)
new_connections.columns = ['Child', 'Parent', self.EDGETYPE_ATTR]
new_nodes = new_connections['Child'].values.tolist()
new_connections['Summary'] = True
df['Summary'] = False
try:
# Used for pandas version >= 0.23
tmp = pd.concat([df, new_connections], ignore_index=True, sort=True)
except:
tmp = pd.concat([df, new_connections], ignore_index=True)
df = tmp[df.columns]
ont = Ontology.from_table(df)
ont.update_node_attr(self.node_attr)
# orig_sizes = pd.DataFrame({'Original_Size' : self.term_sizes}, index=self.terms)
# ont.update_node_attr(orig_sizes)
# if len(new_connections)>0:
# summary_sizes = pd.DataFrame({'Original_Size' : [int(x.split('_')[1]) for x in new_nodes]}, index=new_nodes)
# ont.update_node_attr(summary_sizes)
if len(new_connections) > 0:
ont.update_node_attr(pd.DataFrame({'Label':['_'.join(x.split('_')[1:]) for x in new_nodes]}, index=new_nodes))
return ont
def delete(self,
to_delete=None,
to_keep=None,
preserve_transitivity=True,
inplace=False):
"""Delete genes and/or terms from the ontology.
Parameters
----------
to_delete : array-like (optional)
Names of genes and/or terms to delete. Either to_delete or
to_keep must be specified.
to_keep : array-like (optional)
Names of genes and/or terms to keep; all other genes/terms
are delete. Only used if to_delete is not specified.
preserve_transitivity : bool
If True, then maintain transitive relations when deleting
terms. For example, if the hierarchical structure consists
of
geneA --> term1
term1 --> term2
term2 --> term3
term2 --> term4
then deleting term2 will result in the structure:
geneA --> term1
term1 --> term3
term3 --> term4
If False, then deleting term2 will result in a
disconnected structure:
geneA --> term1
inplace : bool
If True, then modify the ontology. If False, then create and modify a copy.
Returns
-------
: ddot.Ontology.Ontology
"""
if inplace:
ont = self
else:
ont = self.copy()
if to_delete is not None:
terms = set([x for x in to_delete if x in ont.terms_index])
genes = set([x for x in to_delete if x in ont.genes_index])
elif to_keep is not None:
terms = set(ont.terms) - set([x for x in to_keep if x in ont.terms_index])
genes = set(ont.genes) - set([x for x in to_keep if x in ont.genes_index])
else:
raise Exception('Must specify nodes to delete or to keep')
if len(genes) > 0:
ont.genes = [g for g in ont.genes if g not in genes]
ont.genes_index = make_index(ont.genes)
ont.gene_2_term = {g : t for g, t in ont.gene_2_term.items()
if g not in genes}
ont._update_fields()
if len(terms) > 0:
if preserve_transitivity:
gene_2_term_set = {g : set([ont.terms[s] for s in t]) for g, t in ont.gene_2_term.items()}
term_2_gene_set = {a : set(b) for a, b in ont.term_2_gene.items()}
child_2_parent_set = {a : set(b) for a, b in ont.child_2_parent.items()}
parent_2_child_set = {a : set(b) for a, b in ont.parent_2_child.items()}
for t in terms:
t_parents = child_2_parent_set[t]
t_genes = term_2_gene_set[t]
t_children = parent_2_child_set[t]
for g_i in t_genes:
g = ont.genes[g_i]
gene_2_term_set[g].update(t_parents)
gene_2_term_set[g].remove(t)
for p in t_parents:
term_2_gene_set[p].update(t_genes)
parent_2_child_set[p].update(t_children)
parent_2_child_set[p].remove(t)
for c in t_children:
child_2_parent_set[c].update(t_parents)
child_2_parent_set[c].remove(t)
del child_2_parent_set[t]
del parent_2_child_set[t]
del term_2_gene_set[t]
ont.terms = [t for t in ont.terms if t not in terms]
terms_index = make_index(ont.terms)
ont.terms_index = terms_index
ont.gene_2_term = {g : sorted([terms_index[s] for s in t]) for g, t in gene_2_term_set.items()}
ont.child_2_parent = {c : sorted(p) for c, p in child_2_parent_set.items()}
ont.parent_2_child = invert_dict(ont.child_2_parent)
ont._update_fields()
else:
tmp_gene_2_term = {g : [ont.terms[t] for t in t_list]
for g, t_list in ont.gene_2_term.items()}
ont.terms = [t for t in ont.terms if t not in terms]
ont.terms_index = make_index(ont.terms)
ont.gene_2_term = {g : [ont.terms_index[t] for t in t_list if t not in terms]
for g, t_list in tmp_gene_2_term.items()}
ont.parent_2_child = {p : [c for c in c_list if c not in terms]
for p, c_list in ont.parent_2_child.items()
if p not in terms}
ont._update_fields()
# Update node/edge attributes
to_keep = (set(ont.terms) | set(ont.genes)) - genes - terms
ont.edge_attr = ont.edge_attr[ont.edge_attr.index.get_level_values(0).isin(to_keep) | \
ont.edge_attr.index.get_level_values(1).isin(to_keep)]
ont.node_attr = ont.node_attr[ont.node_attr.index.isin(to_keep)]
return ont
def rename(self,
genes=lambda x: x,
terms=lambda x: x,
inplace=False):
"""Rename gene and/or term names.
Parameters
----------
genes : dict or function
If dictionary, then it maps current gene names to new
names. Genes not in dictionary are deleted.
If function, then genes(name) returns the new name.
terms : dict or function
If dictionary, then it maps current term names to new
names. Terms not in dictionary are deleted.
If function, then terms(name) returns the new name.
inplace : bool
If True, then modify the ontology. If False, then create
and modify a copy.
Returns
-------
: ddot.Ontology.Ontology
"""
try:
terms = {t : terms(t) for t in self.terms}
except:
pass
try:
genes = {g : genes(g) for g in self.genes}
except:
pass
if inplace:
ont = self
else:
ont = self.copy()
if genes:
new_genes = set()
new_gene_2_term = {}
for g in ont.genes:
new_g = genes.get(g, g)
if hasattr(new_g, '__iter__') and not isinstance(new_g, str):
for new_gg in new_g:
new_genes.add(new_gg)
new_gene_2_term[new_gg] = ont.gene_2_term[g]
else:
new_genes.add(new_g)
new_gene_2_term[new_g] = ont.gene_2_term[g]
ont.genes = sorted(new_genes)
ont.gene_2_term = new_gene_2_term
ont.genes_index = make_index(ont.genes)
ont._update_fields()
if terms:
ont.parent_2_child = {terms.get(p, p) : [terms.get(c, c) for c in c_list]
for p, c_list in ont.parent_2_child.items()}
old_term_names = ont.terms
ont.terms = [terms.get(t,t) for t in ont.terms]
# Retain a unique set of term names
ont.terms = sorted(set(ont.terms))
ont.terms_index = make_index(ont.terms)
ont.gene_2_term = {g : [ont.terms_index[terms.get(t,t)] for t in [old_term_names[t] for t in t_list]] for g, t_list in ont.gene_2_term.items()}
ont._update_fields()
conversions = genes.copy()
conversions.update(terms)
# Remove identities
conversions = {k : v for k, v in conversions.items() if k!=v}
f = lambda x: conversions.get(x,x)
# Update node attributes
index = ont.node_attr.index
ont.node_attr.index = pd.Series(index).map(f)
# Update edge attributes
idx = ont.edge_attr.index
idx.set_levels([idx.levels[0].map(f), idx.levels[1].map(f)], inplace=True)
ont._check_valid()
return ont
def _check_valid(self):
if not self.is_dag():
print('Found cycle:', nx.find_cycle(self._to_networkx_no_layout()))
raise Exception('Not a directed acyclic graph')
assert len(self.genes) == len(set(self.genes))
assert len(self.terms) == len(set(self.terms))
assert set(self.genes) == set(self.gene_2_term.keys())
assert set(self.terms) == set(self.child_2_parent.keys())
assert set(self.terms) == set(self.parent_2_child.keys())
assert set(self.terms) == set(self.term_2_gene.keys())
assert self.edge_attr.index.duplicated().sum()==0
assert self.node_attr.index.duplicated().sum()==0
def to_table(self,
output=None,
term_2_term=True,
gene_2_term=True,
edge_attr=False,
header=True,
parent_child=True,
clixo_format=False):
"""Convert Ontology to a table representation. Return a
pandas.DataFrame and, optionally, write it to a file as a
tab-delimited file.
Parameters
----------
output : filepath or file-like
File to write table. If None, then only return a
pandas.DataFrame
term_2_term : bool
Include (child term, parent term) pairs
gene_2_term : bool
Include (gene, term) pairs
edge_attr : array-like or bool
List of extra edge attributes to include. If True, then
include all attributes. If False, then don't include any
attribute.
header : bool
If True (default), then write the column names as the
first row of the table.
parent_child : bool
If True, then the first column is the parent term and the
second column is the child term or gene. If False, then
the columns are reversed.
clixo_format : bool
If True, the table is the same format used the CLIXO C++
implementation. In particular, the table has three columns:
Column 1) Parent Term
Column 2) Child Term or Gene
Column 3) The string "gene" if the row is a
gene-term mapping, otherwise the string "default".
Returns
-------
: pandas.DataFrame
Contains at least three columns: (1) "Parent", (2)
"Child", and (3) "EdgeType".
"""
if clixo_format:
df = self.to_table(output=None,
term_2_term=True,
gene_2_term=True,
edge_attr=False,
header=False,
parent_child=True,
clixo_format=False)
df.replace({self.EDGETYPE_ATTR : {self.GENE_TERM_EDGETYPE : 'gene', self.CHILD_PARENT_EDGETYPE : 'default'}}, inplace=True)
if output is not None:
df.to_csv(output, header=False, index=False, sep='\t')
return df
df = pd.DataFrame(columns=['Parent','Child',self.EDGETYPE_ATTR])
if term_2_term:
df = df.append(self._hierarchy_to_pandas(), ignore_index=True)
if gene_2_term:
df = df.append(self._mapping_to_pandas(), ignore_index=True)
if edge_attr and self.edge_attr.shape[1] > 0:
if edge_attr==True:
edge_attr = df.columns
df = df.merge(self.edge_attr,
how='left',
left_on=['Child', 'Parent'],
right_index=True)
first_two = ['Parent', 'Child'] if parent_child else ['Child', 'Parent']
df = df[first_two + [x for x in df.columns if x not in first_two]]
if output is not None:
df.to_csv(output, header=header, index=False, sep='\t')
return df
def _hierarchy_to_pandas(self):
triples = [(p,c) for p, c_list in self.parent_2_child.items() for c in c_list]
df = pd.DataFrame(triples, columns=['Parent', 'Child'])
df[self.EDGETYPE_ATTR] = self.CHILD_PARENT_EDGETYPE
return df
def _mapping_to_pandas(self):
pairs = [(self.terms[t], g) for g, t_list in self.gene_2_term.items() for t in t_list]
df = pd.DataFrame(pairs, columns=['Parent', 'Child'])
df[self.EDGETYPE_ATTR] = self.GENE_TERM_EDGETYPE
return df
def copy(self):
"""Create a deep copy of the Ontology object"""
ont = Ontology(None, None, **{'empty' : True})
for x in ['node_attr', 'edge_attr']:
setattr(ont, x, getattr(self, x).copy())
for x in ['genes', 'terms']:
setattr(ont, x, getattr(self, x)[:])
if self._term_sizes is None:
ont._term_sizes = None
else:
ont._term_sizes = self._term_sizes[:]
for x in ['genes_index', 'terms_index']:
setattr(ont, x, getattr(self, x).copy())
for x in ['gene_2_term', 'term_2_gene', 'child_2_parent', 'parent_2_child']:
copy_val = {k : v[:] for k, v in getattr(self, x).items()}
setattr(ont, x, copy_val)
return ont
def flatten(self,
include_genes=True,
include_terms=False,
similarity='Resnik'):
"""Flatten the hierarchy into a node-node similarity matrix by
calculating a similarity between pair of genes in
`genes_subset`. Currently, only the Resnik semantic similarity
measure is implemented.
Parameters
-----------
include_genes : bool
If True, then calculate pairwise similarities between
genes. If `include_terms` is also True, then also
calculate similarities between genes and terms.
include_terms : bool
If True, then calculate pairwise similarities between
terms. If `include_genes` is also True, then also
calculate similarities between genes and terms.
similarity : str
Type of semantic similarity. (default: "Resnik")
The Resnik similarity s(g1,g2) is defined as
:math:`-log_2(|T_{sca}| / |T_{root}|)` where :math:`|T|` is
the number of genes in `genes_subset` that are under term
T. :math:`T_{sca}` is the "smallest common ancestor", the
common ancestral term with the smallest term
size. :math:`T_{root}` is the root term of the ontology.
<NAME>. (1999). Semantic similarity in a taxonomy: An
information-based measured and its application to problems
of ambiguity in natural
language. <NAME>. Intell. Res. 11,95-130.
Returns
-------
: (sim, nodes)
A 2-tuple consisting of `sim`, a node-by-node NumPy array,
and `nodes`, a NumPy array of the node names in `sim`.
"""
assert include_genes
assert not include_terms, 'include_terms is not yet implemented'
if similarity=='Resnik':
sca, nodes = self.get_best_ancestors(include_genes=include_genes)
nodes_subset = self.genes if include_genes else []
nodes_subset += self.terms if include_terms else []
nodes_idx = ddot.utils.make_index(nodes)
idx = [nodes_idx[v] for v in nodes_subset]
sca = sca[idx, :][:, idx]
ss = -1 * np.log2(np.array(self.term_sizes)[sca] / float(len(self.genes)))
ss = ss.astype(np.float32)
return ss, np.array(nodes_subset)
else:
raise Exception('Unsupported similarity type')
def common_ancestors(self, nodes, min_nodes='all', minimal=True):
"""Return the common ancestors of a set of genes
Parameters
----------
nodes : list
List of nodes (genes and/or terms) to find the common ancestors
min_nodes : str or int
If 'all', then return only terms that contain all of the
input genes. If an integer, then return only terms that
contain at least <nodes> of the input genes
minimal : bool
If True, then do NOT return the terms that are themselves
ancestors of the other common ancestors. This filter
leaves only the 'minimal' set of common ancestors.
Returns
-------
: list
List of common ancestors
"""
if min_nodes=='all':
min_nodes = len(nodes)
conn = self.connected(nodes, self.terms)
anc_bool = conn.sum(0) >= min_nodes
anc = np.array(self.terms)[anc_bool]
if minimal:
anc_conn = self.connected(anc, anc, sparse=False)
np.fill_diagonal(anc_conn, 0)
anc = anc[anc_conn.sum(0) == 0]
return anc
def _get_term_2_gene(self, verbose=False):
if verbose: print('Calculating term_2_gene')
term_2_gene = invert_dict(
self.gene_2_term,
keymap=make_index(self.genes),
valmap=dict(enumerate(self.terms)))
for t in self.terms:
if not t in term_2_gene:
term_2_gene[t] = []
return term_2_gene
@property
def term_sizes(self):
if self._term_sizes is None:
self._term_sizes = self._get_term_sizes(propagate=True)
return self._term_sizes
def _get_term_sizes(self, propagate=True):
"""Returns an array of term sizes in the same order as self.terms"""
if propagate:
ont = self.propagate(inplace=False)
gene_2_term = ont.gene_2_term
# gene_2_term = self._propagate_forward()
else:
gene_2_term = self.gene_2_term
tmp = Counter([x for y in gene_2_term.values() for x in y])
term_sizes = [tmp[x] for x in range(len(self.terms))]
return term_sizes
def get_information_gain(self):
for p in terms:
self.parent_2_children[p]
def shuffle_genes(self, inplace=False):
"""Shuffle the names of genes"""
new_order = self.genes.copy()
random.shuffle(new_order)
rename = dict(zip(self.genes, new_order))
return self.rename(rename, inplace=False)
def get_tree(self, ret='edges', verbose=False):
"""Identify a spanning tree of the DAG (including genes as part of the
DAG).
Parameters
------------
ret : str
If 'edges', then return a list of (u, v) edges in the
tree. If 'ontology', return an Ontology object consisting
of only the tree edges.
Returns
-------
: array-like or Ontology
"""
tree = self.to_igraph(include_genes=True, spanning_tree=True)
if ret=='edges':
tree_edges = set([(tree.vs[e.source]['name'],
tree.vs[e.target]['name'])
for e in tree.es if e['Is_Tree_Edge']=='Tree'])
return tree_edges
elif ret=='ontology':
tree.delete_edges([e.index for e in tree.es if e['Is_Tree_Edge']=='Not_Tree'])
return Ontology.from_igraph(tree, verbose=verbose)
def is_dag(self):
"""Return True if the Ontology is a valid directed acyclic graph,
False otherwise.
"""
return self.to_igraph(include_genes=True, spanning_tree=False).is_dag()
def topological_sorting(self, top_down=True, include_genes=False):
"""Perform a topological sorting.
top_down :
If True, then ancestral nodes (e.g. the root nodes) come
before descendants in the sorting. If False, then reverse the sorting
"""
graph = self.to_igraph(include_genes=include_genes, spanning_tree=False)
topo = list(graph.vs[graph.topological_sorting(mode='out')]['name'])
if not top_down:
topo = topo[::-1]
return topo
def to_igraph(self, include_genes=True, spanning_tree=False):
"""Convert Ontology to an igraph.Graph object. Gene and term names are
stored in the 'name' vertex attribute of the igraph object.
Parameters
----------
include_genes : bool
Include genes as vertices in the igraph object.
spanning_tree : bool
If True, then identify a spanning tree of the DAG. include
an edge attribute "Is_Tree_Edge" that indicates
Returns
-------
: igraph.Graph
"""
if include_genes:
terms_index_offset = {t : v + len(self.genes) for t, v in self.terms_index.items()}
gene_term_edges = [(self.genes_index[g], terms_index_offset[self.terms[t]])
for g in self.genes
for t in self.gene_2_term[g]]
child_parent_edges = [(terms_index_offset[c], terms_index_offset[p])
for p, children in self.parent_2_child.items()
for c in children]
vertex_attrs = self.node_attr.reindex(index=self.genes + self.terms).loc[self.genes + self.terms].to_dict(orient='list')
vertex_attrs.update({
'name':self.genes + self.terms,
self.NODETYPE_ATTR:[self.GENE_NODETYPE for x in self.genes] + [self.TERM_NODETYPE for x in self.terms]
})
graph = igraph.Graph(n=len(self.genes) + len(self.terms),
edges=gene_term_edges + child_parent_edges,
directed=True,
vertex_attrs=vertex_attrs,
edge_attrs={self.EDGETYPE_ATTR : [self.GENE_TERM_EDGETYPE for x in gene_term_edges] + \
[self.CHILD_PARENT_EDGETYPE for x in child_parent_edges]})
else:
edges = [(self.terms_index[c], self.terms_index[p]) for p, children in self.parent_2_child.items() for c in children]
graph = igraph.Graph(n=len(self.terms),
edges=edges,
directed=True,
vertex_attrs={'name':self.terms},
edge_attrs={self.EDGETYPE_ATTR : [self.CHILD_PARENT_EDGETYPE for x in edges]})
if spanning_tree:
parent_priority = [self.term_sizes[self.terms_index[v['name']]] if (v['name'] in self.terms_index) else 1 for v in graph.vs]
# Identify spanning tree
graph = self._make_tree_igraph(
graph,
parent_priority=parent_priority,
optim=min,
edge_name='Is_Tree_Edge')
graph.es['Is_Tree_Edge'] = ['Tree' if x else 'Not_Tree' for x in graph.es['Is_Tree_Edge']]
return graph
def shortest_paths(self,
descendants=None,
ancestors=None,
sparse=False,
weights=None,
chunk_size=500):
"""Calculate the length of the shortest paths from descendant nodes to
ancestor nodes.
Parameters
----------
sparse : bool
If True, return a scipy.sparse matrix. If False, return a
NumPy array
weights : dict
Dictionary mapping (child term, parent term) or (gene,
term) edges to weights. Any edge with no given weight is
assigned a weight of 0 by default.
(default) If weights is None, then a uniform weight is
assumed.
chunk_size : int (optional)
Computational optimization: shortest paths are calculated in batches.
Returns
-------
d : np.ndarray or scipy.sparse.spmatrix
d[x,y] is the length of the shortest directed path from a
descendant node x to ancestor node y. d[x,y]==numpy.inf if
no directed path exists. The rows are in the same order as
<descendants>, and the columns are in the same order as
<ancestors>.
"""
graph = self.to_igraph(include_genes=True, spanning_tree=False)
import numbers
if weights is None:
weights = 1
if weights is not None and not isinstance(weights, numbers.Number):
# Assume dictionary
weights = [weights.get((graph.vs[e.source]['name'],
graph.vs[e.target]['name']), 0) for e in graph.es]
graph.es['weight'] = weights
if descendants is None:
descendants = graph.vs
if ancestors is None:
ancestors = descendants
tmp = [graph.shortest_paths(
descendants[x[0]:x[1]],
ancestors,
weights='weight',
mode='out')
for x in split_indices_chunk(len(descendants), chunk_size)]
if sparse:
return scipy.sparse.vstack([scipy.sparse.csr_matrix(x) for x in tmp])
else:
return np.vstack(tmp)
def longest_paths(self,
descendants=None,
ancestors=None,
sparse=False,
weights=None,
chunk_size=500):# TODO: when ancestors are specified, the results become negative
"""Computes the lengths of the longest directed paths between all pairs
of terms.
Returns
-------
d : np.ndarray or scipy.sparse.spmatrix
d[x,y] is the length of the longest directed path from a
descendant term with index x to an ancestral term with
index y, where indices are defined by
self.terms. d[x,y]==numpy.inf if no directed path exists.
"""
d = self.shortest_paths(descendants=descendants,
ancestors=ancestors,
sparse=sparse,
weights=-1,
chunk_size=chunk_size)
if sparse:
d.data = -1 * d.data
else:
d = -1 * d
return d
def connected(self,
descendants=None,
ancestors=None,
sparse=False):
"""Calculate which genes or terms are descendants of other genes or
terms.
Parameters
-----------
descendants: list
A list of genes and/or terms. Default: A list of all genes
followed by a list of all terms, in the same order as
`self.genes` and `self.terms`.
ancestors: list
A list of genes and/or terms. Default: Same as the
``descendants`` parameter.
sparse : bool
If True, return a scipy.sparse matrix. If False (default),
return a NumPy array.
Returns
-------
d : np.ndarray or scipy.sparse.matrix
A descendants-by-ancestors matrix. ``d[i,j]`` is 1 if term
i is a descendant of term j, and 0 otherwise. Note that
``d[i,i]==1`` and ``d[root,i]==0``, for every i.
"""
d = self.shortest_paths(descendants=descendants,
ancestors=ancestors,
sparse=sparse)
if sparse:
d.data = np.isfinite(d.data)
else:
d = np.isfinite(d)
return d
# def get_leaves(self, terms_list, children_list=None):
# """Returns terms in ``terms_list`` that are not ancestors of any term in
# ``children_list``.
# Parameters
# ----------
# terms_list : list
# children_list : list
# If ``children_list`` is None, then select the terms in
# <terms_list> that are not ancestors of any of the other
# terms in <terms_list>.
# """
# connectivity_matrix_nodiag = self.get_connectivity_matrix_nodiag()
# terms_list = np.array(terms_list)
# if children_list is None:
# children_list = terms_list
# else:
# children_list = np.array(children_list)
# return terms_list[~ np.any(connectivity_matrix_nodiag[children_list, :][:, terms_list], axis=0)]
def propagate(self,
direction='forward',
gene_term=True,
term_term=False,
verbose=False,
inplace=False):
"""Propagate gene-term annotations through the ontology.
As an example, consider an ontology with one gene ``g``, three terms
``t1, t2, t3`` and the following connections:
::
t1-->t2
t2-->t3
g-->t1
g-->t2
In "forward" propagation, a new relation ``g-->t3`` is added. In
"reverse" propagation, the relation "g-->t2" is deleted
because it is an indirect relation inferred from "g-->t1" and
"t1-->t2".
Parameters
----------
direction : str
The direction of propagation. Either 'forward' or 'reverse'
inplace : bool
If True, then modify the ontology. If False, then create
and modify a copy.
Returns
-------
: ddot.Ontology.Ontology
"""
if inplace:
ont = self
else:
ont = self.copy()
assert direction in ['forward', 'reverse'], "Propagation direction must be forward or backward"
forward = direction=='forward'
if not forward:
# This is needed to ensure that the pruning to a parent's
# gene set can be based on the gene sets of its direct
# children
ont = ont.propagate(gene_term=gene_term, term_term=term_term, direction='forward', inplace=True)
if gene_term:
term_2_gene_set = {t : set(g) for t, g in ont.term_2_gene.items()}
if term_term:
parent_2_child_set = {p : set(c) for p, c in ont.parent_2_child.items()}
# # TODO: have this topological sorting be a part of the code below
# graph = ont.to_igraph(include_genes=False, spanning_tree=False)
# for c_idx in graph.topological_sorting(mode='in'):
# child = graph.vs[c_idx]['name']
for child in ont.topological_sorting(top_down=forward, include_genes=False):
for parent in ont.child_2_parent[child]:
if gene_term:
if forward:
term_2_gene_set[parent] |= term_2_gene_set[child]
else:
term_2_gene_set[parent] -= term_2_gene_set[child]
if term_term:
if forward:
parent_2_child_set[parent] |= parent_2_child_set[child]
else:
parent_2_child_set[parent] -= parent_2_child_set[child]
if gene_term:
ont.gene_2_term = invert_dict(term_2_gene_set,
keymap=make_index(ont.terms),
valmap=dict(enumerate(ont.genes)))
ont.term_2_gene = {a : list(b) for a, b in term_2_gene_set.items()}
if term_term:
ont.parent_2_child = {a : list(b) for a, b in parent_2_child_set.items()}
ont.child_2_parent = ont._get_child_2_parent()
ont._check_valid()
return ont
def get_ontotype(self,
genotypes,
input_format='gene_list',
output_format='dataframe',
matrix_columns=None):
"""Transform genotypes to ontotypes.
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., Sharan,
R. and <NAME>., 2016. "Translation of genotype to
phenotype by a hierarchy of cell subsystems". *Cell
Systems*, 2(2), pp.77-88.
Parameters
----------
genotypes : list, np.ndarray, scipy.sparse.spmatrix, pd.DataFrame
input_format : str
If "gene_list", then ``genotypes`` is a list of genotypes,
where genotype is itself a list of genes mutated. Each
gene is assumed to have a mutation value of 1.
If 'matrix', then ``genotypes`` is a genotype-by-gene
matrix, where the value at position (i,j) represents the
mutation value of gene j in genotype i. ``genotypes`` can
be a NumPy array, SciPy sparse matrix, or Pandas
dataframe.
output_format : str
If 'sparse', then return a sparse matrix as a
scipy.sparse.csr_matrix object. (default)
If 'dataframe', then return a pandas.DataFrame object.
If 'array', then return a numpy.ndarray object.
matrix_columns : list
represents a list of the genes that are represented by the
columns of ``genotypes``. Only used when input_format is
"matrix" and ``genotypes`` is a NumPy array or SciPy sparse
matrix.
Returns
-------
: scipy.sparse.csr_matrix, pandas.DataFrame, numpy.ndarray
genotype-by-term matrix, where the ordering of rows and
terms is the same as ``genotypes`` and ``self.terms``
"""
genotypes_names = None
if input_format=='gene_list':
gene_2_term = {k: np.array(v) for k, v in self.gene_2_term.items()}
genotypes_x = [np.concatenate([gene_2_term[g] for g in gset]) if len(gset)>0 else np.array([]) for gset in genotypes]
indices = np.concatenate(genotypes_x)
indptr = np.append(0, np.cumsum([gset.size for gset in genotypes_x]))
data = np.ones((indices.size, ), dtype=np.int64)
ontotypes = scipy.sparse.csr_matrix(
(data, indices, indptr),
(len(genotypes), len(self.terms)))
ontotypes.sum_duplicates()
elif input_format=='matrix':
if isinstance(genotypes, pd.DataFrame):
matrix_columns = genotypes.columns
genotypes_names = genotypes.index
genotypes = genotypes.values
elif isinstance(genotypes, np.ndarray) or scipy.sparse.issparse(genotypes):
assert matrix_columns is not None
else:
raise Exception("Parameter <genotypes> must be a genotype-by-gene matrix "
"represented as a Pandas dataframe, NumPy array, or SciPy sparse matrix. "
"Consider changing the <input_format> parameter")
contained = np.array([g in self.genes_index for g in matrix_columns])
genotypes = scipy.sparse.csc_matrix(genotypes)[:,contained]
gene_2_term_matrix = scipy.sparse.csr_matrix(self.get_gene_2_term_matrix())
gene_2_term_matrix = scipy.sparse.csr_matrix(gene_2_term_matrix)[contained,:]
ontotypes = genotypes.dot(gene_2_term_matrix)
else:
raise Exception('Invalid input format')
if output_format=='dataframe':
ontotypes = pd.DataFrame(ontotypes.toarray(), columns=self.terms)
if genotypes_names is not None:
ontotypes.index = genotypes_names
elif output_format=='sparse':
pass
elif output_format=='array':
ontotypes = ontotypes.toarray()
else:
raise Exception('Invalid output format')
return ontotypes
def get_gene_2_term_matrix(self):
"""Returns a gene-by-term matrix stored as a scipy.sparse.coo_matrix
Returns
-------
: scipy.sparse.coo_matrix
"""
# Convert gene names to indices
gene_2_term = [(self.genes_index[g], t_list)
for g, t_list in self.gene_2_term.items()]
gene_2_term_matrix = scipy.sparse.coo_matrix(
([1 for g, t_list in gene_2_term for t in t_list],
([g for g, t_list in gene_2_term for t in t_list],
[t for g, t_list in gene_2_term for t in t_list])),
shape=(len(self.genes), len(self.terms)))
return gene_2_term_matrix
def summary(self):
"""Summarize the Ontology's contents with respect to number of genes,
terms, and connections.
Returns
--------
: str
"""
if self.node_attr is None:
node_attr_names = []
else:
node_attr_names = self.node_attr.columns.tolist()
# node_attr_names = ', '.join(map(str, self.node_attr.columns))
if self.edge_attr is None:
edge_attr_names = []
else:
edge_attr_names = self.edge_attr.columns.tolist()
# edge_attr_names = ', '.join(map(str, self.edge_attr.columns))
summary = ('%s genes, '
'%s terms, '
'%s gene-term relations, '
'%s term-term relations'
'\nnode_attributes: %s'
'\nedge_attributes: %s') % (
len(self.genes),
len(self.terms),
sum([len(x) for x in self.gene_2_term.values()]),
sum([len(x) for x in self.parent_2_child.values()]),
node_attr_names,
edge_attr_names)
return summary
def to_ndex(self,
ndex_user,
ndex_pass,
ndex_server=None,
name=None,
description=None,
network=None,
main_feature=None,
subnet_max_term_size=None,
visible_term_attr=None,
layout='bubble',
propagate='reverse',
style=None,
node_alias='Original_Name',
term_2_uuid=None,
visibility='PUBLIC',
verbose=False):
"""Upload an Ontology object to NDEx. The Ontology can be preformatted in
several ways including
1. Set a name and description of the Ontology
2. Upload a supporting gene-gene subnetwork for every term in the Ontology
3. Propagate gene-term annotations
4. Layout the nodes.
5. Apply a visual style, e.g. specifying node and edge colors
Parameters
----------
name : str
Name of Ontology
description : str
Description of Ontology
layout : str
The name of the layout algorithm for laying out the
Ontology as a graph. Node positions are stored in the
node attributes 'x_pos' and 'y_pos'. If None, then do not
perform a layout.
style : ndex.networkn.NdexGraph
The Cytoscape.js visual style on NDEx. Represented using
CX and stored in an NdexGraph.
network : pandas.Dataframe
Dataframe describing gene-gene network from which to
create subnetworks for every term. To be passed to
Ontology.upload_subnets_ndex().
features : list of str
Columns in the gene-gene network to upload. To be passed
to Ontology.upload_subnets_ndex().
ndex_server : str
URL of NDEx server
ndex_user : str
NDEx username
ndex_pass : str
NDEx password
public : bool
Whether to make the Ontology public on NDEx
node_alias : str
visibility : str
Returns
-------
: ndex.networkn.NdexGraph
"""
if propagate is not None:
ont = self.propagate(direction=propagate, inplace=False)
else:
ont = self
if ndex_server is None:
ndex_server = ddot.config.ndex_server
if (network is not None) and (term_2_uuid is None):
if subnet_max_term_size is None:
terms = ont.terms
else:
terms = [t for t,s in zip(ont.terms, ont.term_sizes) if s <= subnet_max_term_size]
# Only upload subnets for the unique set of the original
# terms
if node_alias in ont.node_attr.columns:
orig_2_new = {a : b.index.values for a, b in ont.node_attr.loc[terms, [node_alias]].groupby(node_alias)}
terms = [b[0] for b in orig_2_new.values()]
term_2_uuid = ont.upload_subnets_ndex(
network,
main_feature,
name,
ndex_user,
ndex_pass,
ndex_server=ndex_server,
terms=terms,
visibility=visibility,
verbose=verbose
)
if node_alias in ont.node_attr.columns:
term_2_uuid = {s : term_2_uuid[orig_2_new[t][0]] for t in orig_2_new for s in orig_2_new[t] if orig_2_new[t][0] in term_2_uuid}
elif term_2_uuid is None:
term_2_uuid = {}
if verbose: print('Creating NdexGraph')
G = ont.to_NdexGraph(
name=name,
description=description,
term_2_uuid=term_2_uuid,
layout=layout,
style=style)
if visible_term_attr is not None:
df = ddot.utils.nx_nodes_to_pandas(G, visible_term_attr)
df.rename(columns=lambda x: 'Display:' + x, inplace=True)
ddot.utils.set_node_attributes_from_pandas(G, df)
G.set_network_attribute('Display', '|'.join(visible_term_attr))
if verbose: print('Uploading to NDEx')
ont_url = G.upload_to(ndex_server, ndex_user, ndex_pass, visibility=visibility)
return ont_url, G
def to_NdexGraph(self,
name=None,
description=None,
term_2_uuid=None,
spanning_tree=True,
layout='bubble',
style=None,
verbose=False):
"""Formats an Ontology object into a NetworkX object with extra node
attributes that are accessed by the hierarchical viewer.
Parameters
-----------
name : str
Name of Ontology, as would appear if uploaded to NDEx.
description : str
Description of Ontology, as would appear if uploaded to NDEx.
term_2_uuid : dict
A dictionary mapping a term to a NDEx UUID of a gene-gene
subnetwork of genes in that term. the UUID will be stored
in the node attribute 'ndex:internallink'. If uploaded to
NDEx, then this attribute will provide a hyperlink to the
gene-gene subnetwork when the term is clicked upon on the
NDEx page for this ontology.
This dictionary can be created using
Ontology.upload_subnets_ndex(). Default: no dictionary.
layout : str
Layout the genes and terms in this Ontology. Stored in the
node attributes 'x_pos' and 'y_pos'. If None, then do not
perform a layout.
Returns
-------
: ndex.networkn.NdexGraph
"""
# Convert to NetworkX
G = self.to_networkx(layout=layout, spanning_tree=spanning_tree)
if style is None:
style = 'passthrough'
# Set extra attributes for passthrough visual styling
if style=='passthrough':
for v, data in G.nodes(data=True):
is_gene = data[self.NODETYPE_ATTR]==self.GENE_NODETYPE
if 'Vis:Shape' not in data:
data['Vis:Shape'] = 'Rectangle' if is_gene else 'Circle'
if 'Vis:Fill Color' not in data:
data['Vis:Fill Color'] = '#FFFFFF'
if 'Vis:Border Paint' not in data:
data['Vis:Border Paint'] = '#000000'
for u, v, data in G.edges(data=True):
if 'Vis:Visible' not in data and 'Is_Tree_Edge' in data:
data['Vis:Visible'] = data['Is_Tree_Edge']=='Tree'
style = ddot.config.get_passthrough_style()
else:
raise Exception('Unsupported style')
# Set links to subnetworks supporting each term
if term_2_uuid:
for t in self.terms:
if t in term_2_uuid:
G.node[t]['ndex:internalLink'] = '[%s](%s)' % (G.node[t]['Label'], term_2_uuid[t])
# # Change Original_Name to node indices
# name_2_idx = {data['name'] : v for v, data in G.nodes(data=True)}
# for v, data in G.nodes(data=True):
# if 'Original_Name' in data and 'Hidden' in data and data['Hidden']==True:
# data['Original_Name'] = name_2_idx[data['Original_Name']]
G = nx_to_NdexGraph(G)
if name is not None:
G.set_name(name)
if description is not None:
G.set_network_attribute('Description', description)
if style:
import ndex.beta.toolbox as toolbox
toolbox.apply_network_as_template(G, style)
return G
def to_cx(self,
output=None,
name=None,
description=None,
term_2_uuid=None,
spanning_tree=True,
layout='bubble',
style=None):
"""Formats an Ontology object into a CX file format
Parameters
-----------
output : str
Filename or file-like object to write CX file. If None,
then CX is returned as a JSON object, but not written to a
file.
name : str
Name of Ontology, as would appear if uploaded to NDEx.
description : str
Description of Ontology, as would appear if uploaded to NDEx.
term_2_uuid : list
A dictionary mapping a term to a NDEx UUID of a gene-gene
subnetwork of genes in that term. the UUID will be stored
in the node attribute 'ndex:internallink'. If uploaded to
NDEx, then this attribute will provide a hyperlink to the
gene-gene subnetwork when the term is clicked upon on the
NDEx page for this ontology.
This dictionary can be created using
Ontology.upload_subnets_ndex(). Default: no dictionary.
layout : str
Layout the genes and terms in this Ontology. Stored in the
node attributes 'x_pos' and 'y_pos'. If None, then do not
perform a layout.
Returns
-------
: CX representation as a JSON-like dictionary
"""
# Convert to NdexGraph
G = self.to_NdexGraph(name=name,
description=description,
term_2_uuid=term_2_uuid,
spanning_tree=spanning_tree,
layout=layout,
style=style)
cx = G.to_cx()
if output is not None:
if hasattr(output, 'write'):
json.dump(cx, output)
else:
with io.open(output, 'w') as f:
json.dump(cx, f)
return cx
def to_graphml(self,
output,
layout='bubble',
spanning_tree=True):
"""Writes an Ontology object in graphml format.
Parameters
-----------
output : str
Filename or file-like object to write CX file. If None,
then CX is returned as a JSON object, but not written to a
file.
layout : str
Layout the genes and terms in this Ontology. Stored in the
node attributes 'x_pos' and 'y_pos'. If None, then do not
perform a layout.
"""
# Convert to NetworkX
G = self.to_NdexGraph(spanning_tree=spanning_tree,
layout=layout)
if hasattr(output, 'write'):
nx.write_graphml(G, output)
else:
with io.open(output, 'w') as f:
nx.write_graphml(G, f)
def _force_directed_layout(self, G):
"""Force-directed layout on only the terms"""
sub_nx = G.copy()
sub_nx.remove_edges_from([(u,v) for u,v,attr in sub_nx.edges(data=True) if attr['Is_Tree_Edge']=='Not_Tree'])
pos = nx.spring_layout(sub_nx, dim=2, k=None,
pos=None,
fixed=None,
iterations=50,
weight=None,
scale=1.0)
tmp = np.array([x[0] for x in pos.values()])
x_min, x_max = tmp.min(), tmp.max()
tmp = np.array([x[1] for x in pos.values()])
y_min, y_max = tmp.min(), tmp.max()
x_scale = 500. / (y_max - y_min)
y_scale = 500. / (x_max - x_min)
pos = {a : [b[0] * x_scale, b[1] * y_scale] for a, b in pos.items()}
return pos
def upload_subnets_ndex(self,
network,
main_feature,
name,
ndex_user,
ndex_pass,
ndex_server=None,
terms=None,
gene_columns=['Gene1', 'Gene2'],
propagate='forward',
visibility='PUBLIC',
node_attr=None,
node_alias='Original_Name',
z_score=False,
spring_feature=None, spring_weight=1.0,
edge_groups=None,
max_num_edges = -1,
verbose=False):
"""For each term in the ontology, upload a subnetwork of interactions
between the genes in that term to NDEx.
TODO: instead of specifying gene_columns, add another
parameter use_index to specify that genes are the network's
index
Parameters
----------
network : pandas.Dataframe
Dataframe describing network
features : list of str
Columns in network to upload
name : str
Prefix for the names of all subnetworks
ndex_server : str
URL of NDEx server
ndex_user : str
NDEx username
ndex_pass : str
NDEx password
terms : list
List of terms to upload a subnetwork. Default: upload for
all terms.
gene_columns : list
Columns in network that represent the two genes.
propagate : str
The direction ('forward' or 'reverse') to propagate
gene-term annotations up the hierarchy with
Ontology.propagate(). If None, then don't
propagate annotations.
public : bool
Whether to make networks public on NDEx
node_attr : pandas.DataFrame
"""
if propagate:
ont = self.propagate(direction=propagate, inplace=False)
else:
ont = self
if ndex_server is None:
ndex_server = ddot.config.ndex_server
ndex = nc.Ndex(ndex_server, ndex_user, ndex_pass)
term_2_uuid = {}
start = time.time()
g1, g2 = gene_columns[0] + '_lex', gene_columns[1] + '_lex'
features = [f for f in network.columns if (f not in gene_columns)]
assert main_feature in features, 'A main feature of the network must be specified'
network = network[features + gene_columns].copy()
network[gene_columns[0]] = network[gene_columns[0]].astype(str)
network[gene_columns[1]] = network[gene_columns[1]].astype(str)
# Filter dataframe for gene pairs within the ontology
genes_set = set(ont.genes)
tmp = [x in genes_set and y in genes_set
for x, y in zip(network[gene_columns[0]], network[gene_columns[1]])]
network = network.loc[tmp, :]
# Lexicographically sort gene1 and gene2 so that gene1 < gene2
# actually this may be redundant
if z_score:
for feat in features:
network[feat] = network[feat].astype(np.float64)
# Normalize features into z-scores
tmp = network[features]
network[features] = (tmp - tmp.mean()) / tmp.std()
# network_sq = ddot.utils.pivot_square(network, g1, g2, main_feature)
# Calculate the min/max range of features
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
def f(x):
if str(x) in numerics:
return 'numeric'
elif str(x) == 'bool':
return 'boolean'
else:
raise Exception()
feature_types = network[features].dtypes.map(f)
feature_mins = network[features].min().astype(np.str)
feature_maxs = network[features].max().astype(np.str)
# set an upper limit to the maximum number of edges uploaded to NDEx
# (contributed by <NAME>)
if max_num_edges > 0:
network.sort_values(by = main_feature, ascending=False, inplace=True)
network = network.iloc[:max_num_edges, :]
# Lexicographically sort gene1 and gene2 so that gene1 < gene2
# actually this may be redundant
network[g1], network[g2] = zip(
*[(x, y) if x < y else (y, x) for x, y in zip(network[gene_columns[0]], network[gene_columns[1]])])
network_idx = {x: i for i, x in enumerate(zip(network[g1], network[g2]))}
if terms is None:
terms = ont.terms
if verbose: print('Uploading %s terms' % len(terms))
for upload_idx, t in enumerate(terms):
start = time.time()
if node_alias in ont.node_attr.columns:
genes = ont.node_attr.loc[genes, node_alias].values
else:
genes = [ont.genes[g] for g in ont.term_2_gene[t]]
genes.sort()
gene_pairs_idx = [network_idx[gp] for gp in itertools.combinations(genes, 2) \
if gp in network_idx]
# New (Parent weight)
children = ont.parent_2_child[t]
min_children_term_weights = -1
if ('Parent weight' in ont.node_attr.columns.tolist()) and (len(children) >0):
children_term_weights = []
for c in children:
if ont.node_attr.loc[c, 'Parent weight'] >0:
children_term_weights.append(ont.node_attr.loc[c, 'Parent weight'])
if len(children_term_weights):
children_term_weights = np.array(children_term_weights)
min_children_term_weights = np.min(children_term_weights)
if len(gene_pairs_idx) > 0:
network_sub = network.iloc[gene_pairs_idx, :]
network_sub = network_sub.loc[network_sub[main_feature] >= ont.node_attr.loc[t, 'Parent weight']]
# filter network if max_num_edges is greater then 0
if max_num_edges != None and max_num_edges > 0:
network_sub.sort_values(by=main_feature, ascending=False, inplace=True)
network_sub = network_sub.iloc[:max_num_edges, :]
# New: apply some minimum string force so nodes will not fly away
# if spring_feature != None:
# network_sub.loc[network_sub[spring_feature] < min_children_term_weights, spring_feature] = 0.5*min_children_term_weights
# network_sub[spring_feature] = network_sub[spring_feature] ** spring_weight
G_nx = nx.from_pandas_dataframe(network_sub, g1, g2,
edge_attr=features)
if node_attr is not None:
set_node_attributes_from_pandas(G_nx, node_attr)
G_nx.add_nodes_from(list(set(genes) - set(G_nx.nodes())))
# Annotate the membership in children terms
children = ont.parent_2_child[t]
df = pd.DataFrame({c : None for c in children}, index=genes, dtype=bool)
for c in children:
genes_in = [ont.genes[g] for g in ont.term_2_gene[c]]
# for g in genes_in:
# G_nx.node[g]['Group:'+c] = True
df.loc[genes_in, c] = True
df.rename(columns=lambda x: 'Group:'+x, inplace=True)
ddot.utils.set_node_attributes_from_pandas(G_nx, df)
# # If a gene belongs to multiple children, then place it where it is most similar
# for g_i in (df.sum(1) > 0).nonzero():
# g = genes[g_i]
# choices = df.loc[g, :].nonzero()
# network_sq.loc[g, :].argmax()
G = nx_to_NdexGraph(G_nx)
G.set_name('%s supporting network for %s' % (name, t))
G.set_network_attribute('Description', '%s supporting network for %s' % (name, t))
G.set_network_attribute('Main Feature', main_feature)
for f in features:
if (f == spring_feature) and (f != main_feature):
continue
G.set_network_attribute('%s type' % f, feature_types[f])
if feature_types[f] == 'numeric':
G.set_network_attribute('%s min' % f, feature_mins[f])
G.set_network_attribute('%s max' % f, feature_maxs[f])
# for c in children:
# G.set_network_attribute('Group:' + c, True)
G.set_network_attribute('Group', '|'.join(children))
# New: calculate the score threshold of this subnetwork
G.set_network_attribute('Main Feature Default Cutoff', float(ont.node_attr.loc[t, 'Parent weight']))
G.set_network_attribute('Parent weight', float(ont.node_attr.loc[t, 'Parent weight']))
if min_children_term_weights > 0:
G.set_network_attribute('Children weight', '|'.join(['{:.3f}'.format(w) for w in children_term_weights]))
# G.set_network_attribute('Main Feature Default Cutoff', float(min_children_term_weights))
if isinstance(edge_groups, dict) and (len(edge_groups.keys()) > 0):
edge_group_string = []
for k, vs in edge_groups.items():
vs.sort()
edge_group_string.append(','.join([k] + vs))
edge_group_string = '|'.join(edge_group_string)
G.set_network_attribute('edge groups', edge_group_string)
# New: only keep the biggest compoent in the network
G = max(nx.weakly_connected_component_subgraphs(G), key=len)
# # further remove degree == 1 nodes
# if len(G.nodes()) > 6:
# low_deg_nodes = []
# for v, deg in G.degree().items():
# if deg <= 1:
# low_deg_nodes.append(v)
#
# while len(low_deg_nodes) != 0:
# G.remove_nodes_from(low_deg_nodes)
# low_deg_nodes = []
# for v, deg in G.degree().items():
# if deg <= 1:
# low_deg_nodes.append(v)
# New: compute a pre-layout to networks
if spring_feature != None:
# G_cx = G.to_cx() # why converted back and forth
# G = NdexGraph(G_cx)
gsim = layouts._create_simple_graph(G)
pos = nx.spring_layout(gsim, scale=200 * math.sqrt(gsim.number_of_nodes()), weight=spring_feature)
G.pos = pos
# layouts.apply_directed_flow_layout(G, node_width=50, weight=spring_feature)
start_upload = time.time()
ndex_url = G.upload_to(ndex_server, ndex_user, ndex_pass, visibility=visibility)
term_2_uuid[t] = parse_ndex_uuid(ndex_url)
upload_time = time.time() - start_upload
if verbose:
print(upload_idx,
'Term:', t,
'Gene pairs:', len(G_nx.edges()),
'Genes:', len(genes),
'Time:', round(time.time() - start, 4),
'Upload time:', round(upload_time, 4),
'NDEx URL:', ndex_url)
else:
if verbose:
print(upload_idx, 'No data provided for gene pairs in Term: %s' % t)
return term_2_uuid
def get_best_ancestors(self, node_order=None, verbose=False, include_genes=True):
"""Compute the 'best' ancestor for every pair of terms. 'Best' is
specified by a ranking of terms. For example, if terms are
ranked by size, from smallest to largest, then the smallest
common ancestor is calculated.
Parameters
----------
node_order : list
A list of terms, ordered by their rank with the 'best' term at the beginning.
include_genes : bool
Returns
--------
ancestors : np.ndarray
ancestors[a,b] = the best common ancestor of terms a and
b, represented as a 0-based index of self.terms
nodes : list
List of the row and column names. Rows and columns are the
same.
"""
ont = self.propagate(direction='reverse', inplace=False)
graph = ont.to_igraph(include_genes=include_genes, spanning_tree=False)
if node_order is None:
# By default, sort from smallest to largest terms
node_order = [self.terms[t] for t in np.argsort(ont.term_sizes)]
d = np.int8(np.isfinite(np.array(graph.shortest_paths(graph.vs, graph.vs, mode='out'), order='C')))
ancestor_matrix = np.zeros(d.shape, dtype=np.int32)
ancestor_matrix.fill(-1)
if verbose: time_print('Iterating:')
for t in node_order:
i = graph.vs.find(t).index
t_i = self.terms_index[t]
# Note: includes self as a child
children = np.where(d[:,i] == 1)[0]
# For those descendants without a computed LCA yet, set their LCA to this term
lca_sub = ancestor_matrix[children.reshape(-1,1), children]
lca_sub[lca_sub == -1] = t_i
ancestor_matrix[children.reshape(-1,1), children] = lca_sub
# Check symmetry
assert (ancestor_matrix.T == ancestor_matrix).all()
assert (-1 == ancestor_matrix).sum() == 0, 'The ontology may have more than one root'
return ancestor_matrix, graph.vs['name']
@classmethod
def _make_tree_igraph(self,
graph=None,
method='priority',
edge_name='smallest_parent',
parent_priority=None, edge_priority=None, default_priority=None, optim='max'):
"""Returns copy of graph with new edge attribute marking spanning
tree
"""
if graph is None:
graph = self.to_igraph(include_genes=False, spanning_tree=True)
if method=='priority':
assert 1 == (parent_priority is not None) + (edge_priority is not None)
if edge_priority is not None: assert default_priority is not None
if optim=='min': optim=min
if optim=='max': optim=max
graph.es[edge_name] = False
for v in graph.vs:
parents = graph.neighbors(v.index, mode='out')
if len(parents) > 0:
"""Choose the parent with the highest valued priority"""
if parent_priority is not None:
small_parent = optim(parents, key=lambda p: parent_priority[p])
elif edge_priority is not None:
small_parent = optim(parents, key=lambda p: edge_priority.get(graph.get_eid(v.index, p), default_priority))
graph.es[graph.get_eid(v.index, small_parent)][edge_name] = True
else:
raise Exception('Method not supported')
return graph
def to_pickle(self, file, compression='infer'):
"""Saves Ontology object with the Python pickle protocol."""
pandas.io.pickle.to_pickle(self, file, compression=compression)
@classmethod
def read_pickle(cls, file, compression='infer'):
"""Loads an Ontology object from a pickled state."""
return pandas.io.pickle.read_pickle(file, compression=compression)
def __repr__(self):
return self.summary()
def __str__(self):
return self.summary()
|
en
| 0.679411
|
Collapses a node in a Graph (igraph package) while preserving long-range hierarchical relations between descendants and ancestral nodes. # A faster collapse that adds all new edges # simultaneously. Ignores edge attributes # Only add an edge if it doesn't already exist # Set default value of edge attributes to 0 # Update attributes Parses an alignment file created from alignOntology's calculateFDRs script Parameters ----------- f : str Filename of alignment file source : str Indicates which ontology will be the index of the returned pandas.DataFrame. Value must be either 'Term_1' (first ontology) or 'Term_2' (second ontology) Returns -------- : pandas.DataFrame DataFrame with four columns: 'Term', 'Similarity', 'FDR', and 'Size'. The index of the DataFrame are the names of terms in the "source" ontology. # Five columns in the input file # 1) Term from first "computed" ontology # 2) Term from second "reference" ontology # 3) Similarity value # 4) FDR # 5) Size of the term in the first ontology #assert os.path.isdir(ddot.config.alignOntology) #calculateFDRs = os.path.join(ddot.config.alignOntology, 'calculateFDRs') # Kill the process # Change index to terms in hier1 # Change index to terms in hier2 Parses an OBO file and writes the results into several tables. Parameters ---------- obo : str Filename of OBO file output_file : str Filename to write table that describes the ontology's hierarchical structure. The table has four columns: (1) parent term, (2) child term, (3) relation type (e.g. "is_a" or "part_of"), (4) namespace of relation (e.g. "biological_process" or "cellular component") id2name_file : str Filename to write table of term descriptions. The table has two columns: (1) Ontology term (e.g. "GO:0000030"), (2) description (e.g. "mannosyltransferase activity") id2namespace_file : str Filename to write table of term namespaces. The table has two columns: (1) Ontology term (e.g. "GO:0000030"), (2) namespace of the term (e.g. "biological_process") alt_id_file : str Filename to write table of alternative Term IDs that are synonyms and refer to the same term. The table has two columns: (1) Primary Term ID, (2) Alternative Term ID ## Keywords that screw up parsing: # import, is_anonymous, intersection_of, union_of ## Relations # 'is_a:' # 'relationship: has_part' # Not in filtered GO # 'relationship: occurs_in' # Not in filtered GO # 'relationship: part_of' # 'relationship: positively_regulates' # 'relationship: negatively_regulates' # 'relationship: regulates' # 'relationship: results_in' # Not in filtered GO # Remove comments # Add last stanza if it was a term stanza. Include namespace. # Start new term stanza # Set the default namespace, if it exists # In a term stanzo or not #if 'alt_id:' in line: assert False Read gene-term annotations from GAF file format: http://geneontology.org/page/go-annotation-file-gaf-format-21 Parameters ---------- gaf : str Filename of GAF file Returns -------- A list of 2-tuples (gene, GO term) # Check that all annotations are to UniProtKB protein IDs # assert df['DB'].unique().size == 1 and df['DB'].unique()[0]=='UniProtKB' # Remove annotations that have a NOT qualifier # return df.loc[:, ['DB Object ID', 'GO ID']].values.tolist() A Python representation for constructing, analyzing, and manipulating the hierarchical structure of ontologies. An Ontology object contains the following attributes for representing the hierarchical structure. Do not directly modify these attributes. Parameters ---------- genes : list Names of genes terms : list Names of terms gene_2_term : dict gene_2_term[<gene>] --> list of terms connected to <gene>. Terms are represented as their 0-based index in self.terms. term_2_gene : dict term_2_gene[<term>] --> list of genes connected to <term>. Genes are represented as their 0-based index in self.genes. child_2_parent : dict child_2_parent[<child>] --> list of the parent terms of <child> parent_2_child : dict parent_2_child[<parent>] --> list of the children terms of <parent> term_sizes : list A list of every term's size, i.e. the number of unique genes that it and its descendant terms contain. This list has the same order as self.terms. It holds that for every i, `term_sizes[i] = len(self.term_2_gene[self.terms[i]])` Construct an Ontology object. Parameters ---------- hierarchy : list, tuple Iterable of (child term, parent term). E.g. list of 2-tuples mapping : list, tuple Iterable of (gene, term) pairs. E.g. list of 2-tuples edge_attr : pandas.DataFrame Meta-data describing (child_term, parent_term) pairs. Suggestion: The index of the DataFrame must be a pandas.MultiIndex, where the first level is the child term and the second level is the parent term. parent_child : bool If True, then the definitions of <hierarchy> and <mapping> are reversed so that they iterate over (parent term, child term) and (term, gene) pairs. propagate : None, str The direction ('forward' or 'reverse') to propagate gene-term annotations up the hierarchy with Ontology.propagate(). If None, then don't propagate annotations. add_root_name : bool The name of an artificial root. If there are multiple roots in the ontology, then they are joined into one root with this name. Default: Don't create this root. ignore_orphan_terms : bool # Cast all node names to strings ## Read term-to-term edges # parent_2_child[<term_name>] --> list of <term_name>'s children terms ## Read gene-to-term edges # self.gene_2_term[<gene_name>] --> list of terms that <gene_name> is mapped to ## Check that the set of terms is the same according to ## parent_2_child and self.gene_2_term # if verbose and len(terms_A - terms_B)>0: # print 'WARNING: {} terms connected to other terms but not to genes'.format(len(terms_A - terms_B)) ## terms_index[<term_name>] --> index in self.terms ## self.genes_index[<gene_name>] --> index in self.genes ## Convert self.gene_2_term to list term indices rather than term names # if verbose: # print("Changing node_attr index name from %s to 'Node'" % node_attr.index.name) # # import traceback # # print traceback.print_stack() # if 'Child' in edge_attr.index.names and 'Parent' in edge_attr.index.names: # edge_attr.index = edge_attr.index[['Child', 'Parent']] # else: # edge_attr.index.names = ['Child', 'Parent'] # if edge_attr.index.names != ['Child', 'Parent']: # if verbose: # print("Changing edge_attr index names from %s to ['Child', 'Parent']" % edge_attr.index.names) # edge_attr.index.names = ['Child', 'Parent'] # ## Not necessary and requires extra start-up time (perhaps set as a __init__ parameter to precalculate many things) # empty_terms = sum([x==0 for x in self.term_sizes]) # if verbose and empty_terms > 0: # print 'WARNING: {} terms are connected to other terms but not to genes'.format(empty_terms), [t for t, x in zip(self.terms, self.term_sizes) if x==0][:5] # # import traceback # # print traceback.print_stack() Check if there is a single unifying root term of the ontology. If not, then identify the multiple roots and join them under an artificial root. Converts self.parent_2_child to child_2_parent # child_2_parent[<term_name>] --> list of <term_name>'s parent term names Resets the node attributes to be empty. Resets the edge attributes to be empty. Update existing node attributes or add new node attributes. Parameters ---------- node_attr : pandas.DataFrame Dataframe where index are the names of genes or terms and where the columns are the names of node attributes. #### # TODO : make sure that renaming/deleting/collapsing of genes and columns respect the node_attr and edge_attr # Filter for genes and terms in the ontology # Update index to the union of current and new node_attr # Update columns Update existing edge attributes or add new edge attributes. Parameters ---------- edge_attr : pandas.DataFrame Dataframe where the index is a MultiIndex represents edges in the Ontology, such that the first level is the name of a gene or child term, and the second level is the name of a parent term. Columns are the names of edge attributes. # Filter for genes and terms in the ontology # Update index # Update values for overlapping columns Returns a list of the root term(s). Returns ------- : list For each term T in the ontology, create a new dummy term that indirectly connect T's to T. For example, if g1 and g2 are in T, then a new term dummy_T is created so that the new ontology consists of g1 --> T_dummy g2 --> T_dummy T_dummy --> T Parameters ---------- tree_edges : list List of (child, parent) edges that constitute a spanning tree of the ontology. If specified, then for each term T, only the genes that are connected to T in the spanning tree will be re-routed to the dummy node. Default: None. This restriction will not apply Returns ------- : ddot.Ontology.Ontology Creates intermediate duplicate nodes ## Gene-term connections ## Parent-child term connections ################################################## # Set Original_Name and Size for Duplicate Nodes # # For duplicate nodes, set the Original_Name attribute to the name of the original node # Set the 'Size' attribute of duplicate nodes to be the 'Size' # of the original node. If the original node is a term with no # 'Size' attribute, then set 'Size' to be the number of genes # in the term # Append attributes for the new nodes # Used for pandas version >= 0.23 ######################################## # Set Label and Size for collect nodes # ######################################## Traverses the ontology from the root to the leaves while duplicating nodes during the traversal to create a tree representation. Traverse the ontology from the root nodes to the leaves in a breadth-first manner. Each time a node is traversed, then create a duplicate of it Parameters ---------- duplicate : list Nodes to duplicate for unfolding. Default: all genes and terms genes_only : bool If True, then duplicate all of the genes and none of the terms. Default: False levels : # Manual bfs ################################# ### Add nodes and node attributes # Ensure that all 'Size' values are the same numeric type # Identify the root # Set the node attribute 'Label'. If the node has a "Original # Name" attribute, indicating that it is a duplicate, then use # that. Otherwise, use the node's name. ################################# ### Add edges and edge attributes # Assume a list of tree edges are supplied ## Remove collector nodes # Need fast special delete # Set the original term sizes for the original copy of # each term (not the duplicates) ###################################################### # TODO: move this visual styling outside of the layout # functionality # if 'collect_hidden' in v and 'is_collect_node' in data and data['is_collect_node']: # for u in G.predecessors(v): # G.node[u]['Vis:Fill Color'] = '#3182BD' Converts Ontology into a NetworkX object. Parameters ---------- node_attr : pandas.DataFrame Meta-data about genes and terms that will be included as node attributes in the NetworkX object. edge_attr : pandas.DataFrame Meta-data about connections among genes and terms that will be included as edge attributes in the NetworkX object. spanning_tree : bool If True, then identify a spanning tree of the DAG. include an edge attribute "Is_Tree_Edge" that indicates layout : str The name of the layout algorithm for laying out the Ontology as a graph. Node positions are astored in the node attributes 'x_pos' and 'y_pos'. If None, then do not perform a layout. Returns ------- : nx.DiGraph # Assume a list of tree edges are supplied ## Remove collector nodes # Need fast special delete # Set the original term sizes for the original copy of # each term (not the duplicates) ###################################################### # TODO: move this visual styling outside of the layout # functionality # if 'collect_hidden' in v and 'is_collect_node' in data and data['is_collect_node']: # for u in G.predecessors(v): # G.node[u]['Vis:Fill Color'] = '#3182BD' Create Ontology from a tab-delimited table or pandas DataFrame. Duplicate gene-term or term-term connections in the table are removed. Parameters ---------- table : pandas.DataFrame, file-like object, or filename A table that lists (child term, parent term) pairs. If mapping==None, then this table should also include (gene, term) pairs. parent : int or str Column for parent terms in table (index or name of column) child : int or str Column for child terms and genes in table (index or name of column) is_mapping : function A function that is applied on each row and returns True if the row represents a (gene, term) pair and False otherwise. This function is only applied when a separate table of (gene, term) pairs is not specified, i.e. mapping==None. The default function is `lambda row: row[2]=={0}` which tests if the third column equals the string "{0}". mapping : pandas.DataFrame, file-like object, or filename (optional) A separate table listing only (gene, term) pairs mapping_parent : int or str Column for terms in mapping table (index or name of column) mappping_child : int or str Column for genes in mapping table (index or name of column) header : int or None Row number to use as the column names, which are then stored in the resulting Ontology object's `edge_attr` field. For example if `header=0` (default), then the first row is assumed to be column names. If `header=None`, then no column names are assumed. propagate : None or str The direction ('forward' or 'reverse') for propagating gene-term annotations up the hierarchy with Ontology.propagate(). If None, then don't propagate annotations. clixo_format : bool If True, The table is assumed to be in the same format produced by the CLIXO C++ implementation. In particular, table has three columns: Column 1) Parent Term Column 2) Child Term or Gene Column 3) The string "gene" if the row is a gene-term mapping, otherwise the string "default". The table is also assumed to have no column headers (i.e. header=False) clear_default_attr: bool If True (default), then remove the edge attribute 'EdgeType' created using Ontology.to_table(). This attribute was created to make the table be an equivalent representation of an Ontology object; however, it is no longer necessary after reconstructing the Ontology object. Returns ------- : ddot.Ontology.Ontology # print('WARNING: no gene-term connections ' # 'were specified by the is_mapping ' # 'function or separate table. ' # 'Default: assume a gene-term connection when the 3rd column equals %s' % cls.GENE_TERM_EDGETYPE) # Read table # Extract gene-term connections from table # Read separate table of gene-term connections # Used for pandas version >= 0.23 Creates an Ontology object from a linkage matrix created by scipy's hierarchical/agglomerative clustering. Note that this form of clustering produces a binary tree. Reads an Ontology stored on NDEx. Gene and terms are distinguished according by an edge attribute. Parameters ---------- ndex_uuid : str NDEx UUID of ontology edgetype_attr : str Name of the edge attribute that distinguishes a (gene, term) pair from a (child term, parent term) pair gene_value : str Value of the edge attribute for (gene, term) pairs Returns ------- : ddot.Ontology.Ontology Converts a NdexGraph object to an Ontology object. Gene and terms are distinguished by an edge attribute. Parameters ---------- G : NdexGraph edgetype_attr : str Name of the edge attribute that distinguishes a (gene, term) pair from a (child term, parent term) pair gene_value : str Value of the edge attribute for (gene, term) pairs Returns ------- : ddot.Ontology.Ontology Converts a NetworkX object to an Ontology object. Gene and terms are distinguished by an edge attribute. Parameters ---------- G : nx.DiGraph edgetype_attr : str Name of the edge attribute that distinguishes a (gene, term) pair from a (child term, parent term) pair edgetype_value : str Value of the edge attribute for (gene, term) pairs clear_default_attr : bool If True (default), then remove the node and edge attributes that are created in a NetworkX graph using Ontology.to_networkx() or Ontology.to_ndex(). These attributes include 'Label', 'Size', 'NodeType', and 'EdgeType'. These attributes were created to make the NetworkX graph be an equivalent representation of an Ontology object; however, they are no longer necessary after reconstrcting the Ontology object. Returns ------- : ddot.Ontology.Ontology Converts a igraph Graph object to an Ontology object. Gene and terms are distinguished by an edge attribute. Parameters ---------- G : igraph.Graph edgetype_attr : str Name of the edge attribute that distinguishes a (gene, term) pair from a (child term, parent term) pair edgetype_value : str Value of the edge attribute for (gene, term) pairs Returns ------- : ddot.Ontology.Ontology Remove redundant and empty terms. When a term T is removed, hierarchical relations are preserved by connecting every child of T with every parent of T. This removal operation has the nice property of being commutative, i.e. the order of removal does not matter. Parameters ----------- method : str If "mhkramer", then use the collapseRedundantNodes script in the alignOntology package. If "python", then use an internal Python script. min_term_size : int Remove terms that are below this size. TODO: not yet supported Returns ------- : ddot.ddot.Ontology A new Ontology object # Propagate forward and then reverse # assert os.path.isdir(ddot.config.alignOntology) # collapseRedundantNodes = os.path.join(ddot.config.alignOntology, 'collapseRedundantNodes') # print('to_collapse:', sorted(to_collapse)) Collapses two ontologies to the common set of genes. Parameters ----------- ont1 : ddot.Ontology.Ontology ont2 : ddot.Ontology.Ontology Returns ------- ont1_collapsed : ddot.Ontology.Ontology ont2_collapsed : ddot.Ontology.Ontology # Used for pandas version >= 0.23 # orig_sizes = pd.DataFrame({'Original_Size' : self.term_sizes}, index=self.terms) # ont.update_node_attr(orig_sizes) # if len(new_connections)>0: # summary_sizes = pd.DataFrame({'Original_Size' : [int(x.split('_')[1]) for x in new_nodes]}, index=new_nodes) # ont.update_node_attr(summary_sizes) Delete genes and/or terms from the ontology. Parameters ---------- to_delete : array-like (optional) Names of genes and/or terms to delete. Either to_delete or to_keep must be specified. to_keep : array-like (optional) Names of genes and/or terms to keep; all other genes/terms are delete. Only used if to_delete is not specified. preserve_transitivity : bool If True, then maintain transitive relations when deleting terms. For example, if the hierarchical structure consists of geneA --> term1 term1 --> term2 term2 --> term3 term2 --> term4 then deleting term2 will result in the structure: geneA --> term1 term1 --> term3 term3 --> term4 If False, then deleting term2 will result in a disconnected structure: geneA --> term1 inplace : bool If True, then modify the ontology. If False, then create and modify a copy. Returns ------- : ddot.Ontology.Ontology # Update node/edge attributes Rename gene and/or term names. Parameters ---------- genes : dict or function If dictionary, then it maps current gene names to new names. Genes not in dictionary are deleted. If function, then genes(name) returns the new name. terms : dict or function If dictionary, then it maps current term names to new names. Terms not in dictionary are deleted. If function, then terms(name) returns the new name. inplace : bool If True, then modify the ontology. If False, then create and modify a copy. Returns ------- : ddot.Ontology.Ontology # Retain a unique set of term names # Remove identities # Update node attributes # Update edge attributes Convert Ontology to a table representation. Return a pandas.DataFrame and, optionally, write it to a file as a tab-delimited file. Parameters ---------- output : filepath or file-like File to write table. If None, then only return a pandas.DataFrame term_2_term : bool Include (child term, parent term) pairs gene_2_term : bool Include (gene, term) pairs edge_attr : array-like or bool List of extra edge attributes to include. If True, then include all attributes. If False, then don't include any attribute. header : bool If True (default), then write the column names as the first row of the table. parent_child : bool If True, then the first column is the parent term and the second column is the child term or gene. If False, then the columns are reversed. clixo_format : bool If True, the table is the same format used the CLIXO C++ implementation. In particular, the table has three columns: Column 1) Parent Term Column 2) Child Term or Gene Column 3) The string "gene" if the row is a gene-term mapping, otherwise the string "default". Returns ------- : pandas.DataFrame Contains at least three columns: (1) "Parent", (2) "Child", and (3) "EdgeType". Create a deep copy of the Ontology object Flatten the hierarchy into a node-node similarity matrix by calculating a similarity between pair of genes in `genes_subset`. Currently, only the Resnik semantic similarity measure is implemented. Parameters ----------- include_genes : bool If True, then calculate pairwise similarities between genes. If `include_terms` is also True, then also calculate similarities between genes and terms. include_terms : bool If True, then calculate pairwise similarities between terms. If `include_genes` is also True, then also calculate similarities between genes and terms. similarity : str Type of semantic similarity. (default: "Resnik") The Resnik similarity s(g1,g2) is defined as :math:`-log_2(|T_{sca}| / |T_{root}|)` where :math:`|T|` is the number of genes in `genes_subset` that are under term T. :math:`T_{sca}` is the "smallest common ancestor", the common ancestral term with the smallest term size. :math:`T_{root}` is the root term of the ontology. <NAME>. (1999). Semantic similarity in a taxonomy: An information-based measured and its application to problems of ambiguity in natural language. <NAME>. Intell. Res. 11,95-130. Returns ------- : (sim, nodes) A 2-tuple consisting of `sim`, a node-by-node NumPy array, and `nodes`, a NumPy array of the node names in `sim`. Return the common ancestors of a set of genes Parameters ---------- nodes : list List of nodes (genes and/or terms) to find the common ancestors min_nodes : str or int If 'all', then return only terms that contain all of the input genes. If an integer, then return only terms that contain at least <nodes> of the input genes minimal : bool If True, then do NOT return the terms that are themselves ancestors of the other common ancestors. This filter leaves only the 'minimal' set of common ancestors. Returns ------- : list List of common ancestors Returns an array of term sizes in the same order as self.terms # gene_2_term = self._propagate_forward() Shuffle the names of genes Identify a spanning tree of the DAG (including genes as part of the DAG). Parameters ------------ ret : str If 'edges', then return a list of (u, v) edges in the tree. If 'ontology', return an Ontology object consisting of only the tree edges. Returns ------- : array-like or Ontology Return True if the Ontology is a valid directed acyclic graph, False otherwise. Perform a topological sorting. top_down : If True, then ancestral nodes (e.g. the root nodes) come before descendants in the sorting. If False, then reverse the sorting Convert Ontology to an igraph.Graph object. Gene and term names are stored in the 'name' vertex attribute of the igraph object. Parameters ---------- include_genes : bool Include genes as vertices in the igraph object. spanning_tree : bool If True, then identify a spanning tree of the DAG. include an edge attribute "Is_Tree_Edge" that indicates Returns ------- : igraph.Graph # Identify spanning tree Calculate the length of the shortest paths from descendant nodes to ancestor nodes. Parameters ---------- sparse : bool If True, return a scipy.sparse matrix. If False, return a NumPy array weights : dict Dictionary mapping (child term, parent term) or (gene, term) edges to weights. Any edge with no given weight is assigned a weight of 0 by default. (default) If weights is None, then a uniform weight is assumed. chunk_size : int (optional) Computational optimization: shortest paths are calculated in batches. Returns ------- d : np.ndarray or scipy.sparse.spmatrix d[x,y] is the length of the shortest directed path from a descendant node x to ancestor node y. d[x,y]==numpy.inf if no directed path exists. The rows are in the same order as <descendants>, and the columns are in the same order as <ancestors>. # Assume dictionary # TODO: when ancestors are specified, the results become negative Computes the lengths of the longest directed paths between all pairs of terms. Returns ------- d : np.ndarray or scipy.sparse.spmatrix d[x,y] is the length of the longest directed path from a descendant term with index x to an ancestral term with index y, where indices are defined by self.terms. d[x,y]==numpy.inf if no directed path exists. Calculate which genes or terms are descendants of other genes or terms. Parameters ----------- descendants: list A list of genes and/or terms. Default: A list of all genes followed by a list of all terms, in the same order as `self.genes` and `self.terms`. ancestors: list A list of genes and/or terms. Default: Same as the ``descendants`` parameter. sparse : bool If True, return a scipy.sparse matrix. If False (default), return a NumPy array. Returns ------- d : np.ndarray or scipy.sparse.matrix A descendants-by-ancestors matrix. ``d[i,j]`` is 1 if term i is a descendant of term j, and 0 otherwise. Note that ``d[i,i]==1`` and ``d[root,i]==0``, for every i. # def get_leaves(self, terms_list, children_list=None): # """Returns terms in ``terms_list`` that are not ancestors of any term in # ``children_list``. # Parameters # ---------- # terms_list : list # children_list : list # If ``children_list`` is None, then select the terms in # <terms_list> that are not ancestors of any of the other # terms in <terms_list>. # """ # connectivity_matrix_nodiag = self.get_connectivity_matrix_nodiag() # terms_list = np.array(terms_list) # if children_list is None: # children_list = terms_list # else: # children_list = np.array(children_list) # return terms_list[~ np.any(connectivity_matrix_nodiag[children_list, :][:, terms_list], axis=0)] Propagate gene-term annotations through the ontology. As an example, consider an ontology with one gene ``g``, three terms ``t1, t2, t3`` and the following connections: :: t1-->t2 t2-->t3 g-->t1 g-->t2 In "forward" propagation, a new relation ``g-->t3`` is added. In "reverse" propagation, the relation "g-->t2" is deleted because it is an indirect relation inferred from "g-->t1" and "t1-->t2". Parameters ---------- direction : str The direction of propagation. Either 'forward' or 'reverse' inplace : bool If True, then modify the ontology. If False, then create and modify a copy. Returns ------- : ddot.Ontology.Ontology # This is needed to ensure that the pruning to a parent's # gene set can be based on the gene sets of its direct # children # # TODO: have this topological sorting be a part of the code below # graph = ont.to_igraph(include_genes=False, spanning_tree=False) # for c_idx in graph.topological_sorting(mode='in'): # child = graph.vs[c_idx]['name'] Transform genotypes to ontotypes. .. [1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., Sharan, R. and <NAME>., 2016. "Translation of genotype to phenotype by a hierarchy of cell subsystems". *Cell Systems*, 2(2), pp.77-88. Parameters ---------- genotypes : list, np.ndarray, scipy.sparse.spmatrix, pd.DataFrame input_format : str If "gene_list", then ``genotypes`` is a list of genotypes, where genotype is itself a list of genes mutated. Each gene is assumed to have a mutation value of 1. If 'matrix', then ``genotypes`` is a genotype-by-gene matrix, where the value at position (i,j) represents the mutation value of gene j in genotype i. ``genotypes`` can be a NumPy array, SciPy sparse matrix, or Pandas dataframe. output_format : str If 'sparse', then return a sparse matrix as a scipy.sparse.csr_matrix object. (default) If 'dataframe', then return a pandas.DataFrame object. If 'array', then return a numpy.ndarray object. matrix_columns : list represents a list of the genes that are represented by the columns of ``genotypes``. Only used when input_format is "matrix" and ``genotypes`` is a NumPy array or SciPy sparse matrix. Returns ------- : scipy.sparse.csr_matrix, pandas.DataFrame, numpy.ndarray genotype-by-term matrix, where the ordering of rows and terms is the same as ``genotypes`` and ``self.terms`` Returns a gene-by-term matrix stored as a scipy.sparse.coo_matrix Returns ------- : scipy.sparse.coo_matrix # Convert gene names to indices Summarize the Ontology's contents with respect to number of genes, terms, and connections. Returns -------- : str # node_attr_names = ', '.join(map(str, self.node_attr.columns)) # edge_attr_names = ', '.join(map(str, self.edge_attr.columns)) Upload an Ontology object to NDEx. The Ontology can be preformatted in several ways including 1. Set a name and description of the Ontology 2. Upload a supporting gene-gene subnetwork for every term in the Ontology 3. Propagate gene-term annotations 4. Layout the nodes. 5. Apply a visual style, e.g. specifying node and edge colors Parameters ---------- name : str Name of Ontology description : str Description of Ontology layout : str The name of the layout algorithm for laying out the Ontology as a graph. Node positions are stored in the node attributes 'x_pos' and 'y_pos'. If None, then do not perform a layout. style : ndex.networkn.NdexGraph The Cytoscape.js visual style on NDEx. Represented using CX and stored in an NdexGraph. network : pandas.Dataframe Dataframe describing gene-gene network from which to create subnetworks for every term. To be passed to Ontology.upload_subnets_ndex(). features : list of str Columns in the gene-gene network to upload. To be passed to Ontology.upload_subnets_ndex(). ndex_server : str URL of NDEx server ndex_user : str NDEx username ndex_pass : str NDEx password public : bool Whether to make the Ontology public on NDEx node_alias : str visibility : str Returns ------- : ndex.networkn.NdexGraph # Only upload subnets for the unique set of the original # terms Formats an Ontology object into a NetworkX object with extra node attributes that are accessed by the hierarchical viewer. Parameters ----------- name : str Name of Ontology, as would appear if uploaded to NDEx. description : str Description of Ontology, as would appear if uploaded to NDEx. term_2_uuid : dict A dictionary mapping a term to a NDEx UUID of a gene-gene subnetwork of genes in that term. the UUID will be stored in the node attribute 'ndex:internallink'. If uploaded to NDEx, then this attribute will provide a hyperlink to the gene-gene subnetwork when the term is clicked upon on the NDEx page for this ontology. This dictionary can be created using Ontology.upload_subnets_ndex(). Default: no dictionary. layout : str Layout the genes and terms in this Ontology. Stored in the node attributes 'x_pos' and 'y_pos'. If None, then do not perform a layout. Returns ------- : ndex.networkn.NdexGraph # Convert to NetworkX # Set extra attributes for passthrough visual styling # Set links to subnetworks supporting each term # # Change Original_Name to node indices # name_2_idx = {data['name'] : v for v, data in G.nodes(data=True)} # for v, data in G.nodes(data=True): # if 'Original_Name' in data and 'Hidden' in data and data['Hidden']==True: # data['Original_Name'] = name_2_idx[data['Original_Name']] Formats an Ontology object into a CX file format Parameters ----------- output : str Filename or file-like object to write CX file. If None, then CX is returned as a JSON object, but not written to a file. name : str Name of Ontology, as would appear if uploaded to NDEx. description : str Description of Ontology, as would appear if uploaded to NDEx. term_2_uuid : list A dictionary mapping a term to a NDEx UUID of a gene-gene subnetwork of genes in that term. the UUID will be stored in the node attribute 'ndex:internallink'. If uploaded to NDEx, then this attribute will provide a hyperlink to the gene-gene subnetwork when the term is clicked upon on the NDEx page for this ontology. This dictionary can be created using Ontology.upload_subnets_ndex(). Default: no dictionary. layout : str Layout the genes and terms in this Ontology. Stored in the node attributes 'x_pos' and 'y_pos'. If None, then do not perform a layout. Returns ------- : CX representation as a JSON-like dictionary # Convert to NdexGraph Writes an Ontology object in graphml format. Parameters ----------- output : str Filename or file-like object to write CX file. If None, then CX is returned as a JSON object, but not written to a file. layout : str Layout the genes and terms in this Ontology. Stored in the node attributes 'x_pos' and 'y_pos'. If None, then do not perform a layout. # Convert to NetworkX Force-directed layout on only the terms For each term in the ontology, upload a subnetwork of interactions between the genes in that term to NDEx. TODO: instead of specifying gene_columns, add another parameter use_index to specify that genes are the network's index Parameters ---------- network : pandas.Dataframe Dataframe describing network features : list of str Columns in network to upload name : str Prefix for the names of all subnetworks ndex_server : str URL of NDEx server ndex_user : str NDEx username ndex_pass : str NDEx password terms : list List of terms to upload a subnetwork. Default: upload for all terms. gene_columns : list Columns in network that represent the two genes. propagate : str The direction ('forward' or 'reverse') to propagate gene-term annotations up the hierarchy with Ontology.propagate(). If None, then don't propagate annotations. public : bool Whether to make networks public on NDEx node_attr : pandas.DataFrame # Filter dataframe for gene pairs within the ontology # Lexicographically sort gene1 and gene2 so that gene1 < gene2 # actually this may be redundant # Normalize features into z-scores # network_sq = ddot.utils.pivot_square(network, g1, g2, main_feature) # Calculate the min/max range of features # set an upper limit to the maximum number of edges uploaded to NDEx # (contributed by <NAME>) # Lexicographically sort gene1 and gene2 so that gene1 < gene2 # actually this may be redundant # New (Parent weight) # filter network if max_num_edges is greater then 0 # New: apply some minimum string force so nodes will not fly away # if spring_feature != None: # network_sub.loc[network_sub[spring_feature] < min_children_term_weights, spring_feature] = 0.5*min_children_term_weights # network_sub[spring_feature] = network_sub[spring_feature] ** spring_weight # Annotate the membership in children terms # for g in genes_in: # G_nx.node[g]['Group:'+c] = True # # If a gene belongs to multiple children, then place it where it is most similar # for g_i in (df.sum(1) > 0).nonzero(): # g = genes[g_i] # choices = df.loc[g, :].nonzero() # network_sq.loc[g, :].argmax() # for c in children: # G.set_network_attribute('Group:' + c, True) # New: calculate the score threshold of this subnetwork # G.set_network_attribute('Main Feature Default Cutoff', float(min_children_term_weights)) # New: only keep the biggest compoent in the network # # further remove degree == 1 nodes # if len(G.nodes()) > 6: # low_deg_nodes = [] # for v, deg in G.degree().items(): # if deg <= 1: # low_deg_nodes.append(v) # # while len(low_deg_nodes) != 0: # G.remove_nodes_from(low_deg_nodes) # low_deg_nodes = [] # for v, deg in G.degree().items(): # if deg <= 1: # low_deg_nodes.append(v) # New: compute a pre-layout to networks # G_cx = G.to_cx() # why converted back and forth # G = NdexGraph(G_cx) # layouts.apply_directed_flow_layout(G, node_width=50, weight=spring_feature) Compute the 'best' ancestor for every pair of terms. 'Best' is specified by a ranking of terms. For example, if terms are ranked by size, from smallest to largest, then the smallest common ancestor is calculated. Parameters ---------- node_order : list A list of terms, ordered by their rank with the 'best' term at the beginning. include_genes : bool Returns -------- ancestors : np.ndarray ancestors[a,b] = the best common ancestor of terms a and b, represented as a 0-based index of self.terms nodes : list List of the row and column names. Rows and columns are the same. # By default, sort from smallest to largest terms # Note: includes self as a child # For those descendants without a computed LCA yet, set their LCA to this term # Check symmetry Returns copy of graph with new edge attribute marking spanning tree Choose the parent with the highest valued priority Saves Ontology object with the Python pickle protocol. Loads an Ontology object from a pickled state.
| 1.910095
| 2
|
bar_chart.py
|
Evan-Yanagida/simple-bar-chart
| 0
|
6625872
|
import matplotlib.pyplot as plt
#See Line 39
#Calculate x_offset (in order to center text for number of steps walked each day to each bar)
def calc_x_offset(steps_walked_value):
num_of_digits = len(str(steps_walked_value))
x_offset = .925 - (num_of_digits - 1)*.075
return x_offset
#INPUT DATA
days_of_the_week = [1,2,3,4,5,6,7]
steps_walked = [10, 1200, 1, 2379, 300, 2329, 6541]
#See Line 39
#Allows the labeling of number of steps (blue bold text) to work
fig, ax = plt.subplots()
x = days_of_the_week
y = steps_walked
plt.bar(x, y)
#TEXT: Hide y-axis label and ticks
cur_axes = plt.gca()
cur_axes.axes.get_yaxis().set_visible(False)
#TEXT: Set x-axis label and ticks
labels = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
plt.xlabel('Day of the Week', weight="semibold", size='medium')
plt.xticks(x, labels, rotation='vertical', fontname="monospace", size='x-small')
#TEXT: Set title for the bar chart
plt.title('Sweatcoin Steps', weight="black", size="xx-large")
#TEXT: Add the values for number of steps walked, to the ends of each bar, in blue bold font
#Also centers these values to the bar adaptively regardless of variance in how many digits there are for number of steps
for i, v in enumerate(y):
ax.text(i + calc_x_offset(steps_walked[i]), v + .25, str(steps_walked[i]), color="blue", fontweight="bold")
#SPACING: Set a bottom margin to fit x-axis' text into the figure
plt.subplots_adjust(bottom=0.25)
#Display the bar chart
plt.show()
|
import matplotlib.pyplot as plt
#See Line 39
#Calculate x_offset (in order to center text for number of steps walked each day to each bar)
def calc_x_offset(steps_walked_value):
num_of_digits = len(str(steps_walked_value))
x_offset = .925 - (num_of_digits - 1)*.075
return x_offset
#INPUT DATA
days_of_the_week = [1,2,3,4,5,6,7]
steps_walked = [10, 1200, 1, 2379, 300, 2329, 6541]
#See Line 39
#Allows the labeling of number of steps (blue bold text) to work
fig, ax = plt.subplots()
x = days_of_the_week
y = steps_walked
plt.bar(x, y)
#TEXT: Hide y-axis label and ticks
cur_axes = plt.gca()
cur_axes.axes.get_yaxis().set_visible(False)
#TEXT: Set x-axis label and ticks
labels = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
plt.xlabel('Day of the Week', weight="semibold", size='medium')
plt.xticks(x, labels, rotation='vertical', fontname="monospace", size='x-small')
#TEXT: Set title for the bar chart
plt.title('Sweatcoin Steps', weight="black", size="xx-large")
#TEXT: Add the values for number of steps walked, to the ends of each bar, in blue bold font
#Also centers these values to the bar adaptively regardless of variance in how many digits there are for number of steps
for i, v in enumerate(y):
ax.text(i + calc_x_offset(steps_walked[i]), v + .25, str(steps_walked[i]), color="blue", fontweight="bold")
#SPACING: Set a bottom margin to fit x-axis' text into the figure
plt.subplots_adjust(bottom=0.25)
#Display the bar chart
plt.show()
|
en
| 0.780596
|
#See Line 39 #Calculate x_offset (in order to center text for number of steps walked each day to each bar) #INPUT DATA #See Line 39 #Allows the labeling of number of steps (blue bold text) to work #TEXT: Hide y-axis label and ticks #TEXT: Set x-axis label and ticks #TEXT: Set title for the bar chart #TEXT: Add the values for number of steps walked, to the ends of each bar, in blue bold font #Also centers these values to the bar adaptively regardless of variance in how many digits there are for number of steps #SPACING: Set a bottom margin to fit x-axis' text into the figure #Display the bar chart
| 3.690231
| 4
|
qiskit/test/base.py
|
AkashNarayanan/qiskit-terra
| 1,599
|
6625873
|
<reponame>AkashNarayanan/qiskit-terra<gh_stars>1000+
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=attribute-defined-outside-init,invalid-name,missing-type-doc
# pylint: disable=unused-argument,broad-except,bad-staticmethod-argument
# pylint: disable=inconsistent-return-statements
"""Base TestCases for the unit tests.
Implementors of unit tests for Terra are encouraged to subclass
``QiskitTestCase`` in order to take advantage of utility functions (for example,
the environment variables for customizing different options), and the
decorators in the ``decorators`` package.
"""
import inspect
import logging
import os
import sys
import warnings
import unittest
from unittest.util import safe_repr
try:
import fixtures
import testtools
HAS_FIXTURES = True
except ImportError:
HAS_FIXTURES = False
from .decorators import enforce_subclasses_call
from .utils import Path, setup_test_logging
__unittest = True # Allows shorter stack trace for .assertDictAlmostEqual
# If testtools is installed use that as a (mostly) drop in replacement for
# unittest's TestCase. This will enable the fixtures used for capturing stdout
# stderr, and pylogging to attach the output to stestr's result stream.
if HAS_FIXTURES:
class BaseTestCase(testtools.TestCase):
"""Base test class."""
# testtools maintains their own version of assert functions which mostly
# behave as value adds to the std unittest assertion methods. However,
# for assertEquals and assertRaises modern unittest has diverged from
# the forks in testtools and offer more (or different) options that are
# incompatible testtools versions. Just use the stdlib versions so that
# our tests work as expected.
assertRaises = unittest.TestCase.assertRaises
assertEqual = unittest.TestCase.assertEqual
else:
class BaseTestCase(unittest.TestCase):
"""Base test class."""
pass
@enforce_subclasses_call(["setUp", "setUpClass", "tearDown", "tearDownClass"])
class BaseQiskitTestCase(BaseTestCase):
"""Additions for test cases for all Qiskit-family packages.
The additions here are intended for all packages, not just Terra. Terra-specific logic should
be in the Terra-specific classes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__setup_called = False
self.__teardown_called = False
def setUp(self):
super().setUp()
if self.__setup_called:
raise ValueError(
"In File: %s\n"
"TestCase.setUp was already called. Do not explicitly call "
"setUp from your tests. In your own setUp, use super to call "
"the base setUp." % (sys.modules[self.__class__.__module__].__file__,)
)
self.__setup_called = True
def tearDown(self):
super().tearDown()
if self.__teardown_called:
raise ValueError(
"In File: %s\n"
"TestCase.tearDown was already called. Do not explicitly call "
"tearDown from your tests. In your own tearDown, use super to "
"call the base tearDown." % (sys.modules[self.__class__.__module__].__file__,)
)
self.__teardown_called = True
@staticmethod
def _get_resource_path(filename, path=Path.TEST):
"""Get the absolute path to a resource.
Args:
filename (string): filename or relative path to the resource.
path (Path): path used as relative to the filename.
Returns:
str: the absolute path to the resource.
"""
return os.path.normpath(os.path.join(path.value, filename))
def assertDictAlmostEqual(
self, dict1, dict2, delta=None, msg=None, places=None, default_value=0
):
"""Assert two dictionaries with numeric values are almost equal.
Fail if the two dictionaries are unequal as determined by
comparing that the difference between values with the same key are
not greater than delta (default 1e-8), or that difference rounded
to the given number of decimal places is not zero. If a key in one
dictionary is not in the other the default_value keyword argument
will be used for the missing value (default 0). If the two objects
compare equal then they will automatically compare almost equal.
Args:
dict1 (dict): a dictionary.
dict2 (dict): a dictionary.
delta (number): threshold for comparison (defaults to 1e-8).
msg (str): return a custom message on failure.
places (int): number of decimal places for comparison.
default_value (number): default value for missing keys.
Raises:
TypeError: if the arguments are not valid (both `delta` and
`places` are specified).
AssertionError: if the dictionaries are not almost equal.
"""
error_msg = dicts_almost_equal(dict1, dict2, delta, places, default_value)
if error_msg:
msg = self._formatMessage(msg, error_msg)
raise self.failureException(msg)
class QiskitTestCase(BaseQiskitTestCase):
"""Terra-specific extra functionality for test cases."""
def tearDown(self):
super().tearDown()
# Reset the default providers, as in practice they acts as a singleton
# due to importing the instances from the top-level qiskit namespace.
from qiskit.providers.basicaer import BasicAer
BasicAer._backends = BasicAer._verify_backends()
@classmethod
def setUpClass(cls):
super().setUpClass()
# Determines if the TestCase is using IBMQ credentials.
cls.using_ibmq_credentials = False
# Set logging to file and stdout if the LOG_LEVEL envar is set.
cls.log = logging.getLogger(cls.__name__)
if os.getenv("LOG_LEVEL"):
filename = "%s.log" % os.path.splitext(inspect.getfile(cls))[0]
setup_test_logging(cls.log, os.getenv("LOG_LEVEL"), filename)
warnings.filterwarnings("error", category=DeprecationWarning)
allow_DeprecationWarning_modules = [
"test.python.pulse.test_parameters",
"test.python.pulse.test_transforms",
"test.python.circuit.test_gate_power",
"test.python.pulse.test_builder",
"test.python.pulse.test_block",
"test.python.quantum_info.operators.symplectic.test_legacy_pauli",
"qiskit.quantum_info.operators.pauli",
"pybobyqa",
"numba",
"qiskit.utils.measurement_error_mitigation",
"qiskit.circuit.library.standard_gates.x",
"qiskit.pulse.schedule",
"qiskit.pulse.instructions.instruction",
"qiskit.pulse.instructions.play",
"qiskit.pulse.library.parametric_pulses",
"qiskit.quantum_info.operators.symplectic.pauli",
"test.python.dagcircuit.test_dagcircuit",
"importlib_metadata",
]
for mod in allow_DeprecationWarning_modules:
warnings.filterwarnings("default", category=DeprecationWarning, module=mod)
allow_DeprecationWarning_message = [
r".*LogNormalDistribution.*",
r".*NormalDistribution.*",
r".*UniformDistribution.*",
r".*QuantumCircuit\.combine.*",
r".*QuantumCircuit\.__add__.*",
r".*QuantumCircuit\.__iadd__.*",
r".*QuantumCircuit\.extend.*",
r".*qiskit\.circuit\.library\.standard_gates\.ms import.*",
r"elementwise comparison failed.*",
r"The jsonschema validation included in qiskit-terra.*",
r"The DerivativeBase.parameter_expression_grad method.*",
r"Back-references to from Bit instances.*",
r"The QuantumCircuit.u. method.*",
r"The QuantumCircuit.cu.",
r"The CXDirection pass has been deprecated",
r"The pauli_basis function with PauliTable.*",
]
for msg in allow_DeprecationWarning_message:
warnings.filterwarnings("default", category=DeprecationWarning, message=msg)
class FullQiskitTestCase(QiskitTestCase):
"""Terra-specific further additions for test cases, if ``testtools`` is available.
It is not normally safe to derive from this class by name; on import, Terra checks if the
necessary packages are available, and binds this class to the name :obj:`~QiskitTestCase` if so.
If you derive directly from it, you may try and instantiate the class without satisfying its
dependencies."""
def setUp(self):
super().setUp()
if os.environ.get("QISKIT_TEST_CAPTURE_STREAMS"):
stdout = self.useFixture(fixtures.StringStream("stdout")).stream
self.useFixture(fixtures.MonkeyPatch("sys.stdout", stdout))
stderr = self.useFixture(fixtures.StringStream("stderr")).stream
self.useFixture(fixtures.MonkeyPatch("sys.stderr", stderr))
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False, level=None))
def dicts_almost_equal(dict1, dict2, delta=None, places=None, default_value=0):
"""Test if two dictionaries with numeric values are almost equal.
Fail if the two dictionaries are unequal as determined by
comparing that the difference between values with the same key are
not greater than delta (default 1e-8), or that difference rounded
to the given number of decimal places is not zero. If a key in one
dictionary is not in the other the default_value keyword argument
will be used for the missing value (default 0). If the two objects
compare equal then they will automatically compare almost equal.
Args:
dict1 (dict): a dictionary.
dict2 (dict): a dictionary.
delta (number): threshold for comparison (defaults to 1e-8).
places (int): number of decimal places for comparison.
default_value (number): default value for missing keys.
Raises:
TypeError: if the arguments are not valid (both `delta` and
`places` are specified).
Returns:
String: Empty string if dictionaries are almost equal. A description
of their difference if they are deemed not almost equal.
"""
def valid_comparison(value):
"""compare value to delta, within places accuracy"""
if places is not None:
return round(value, places) == 0
else:
return value < delta
# Check arguments.
if dict1 == dict2:
return ""
if places is not None:
if delta is not None:
raise TypeError("specify delta or places not both")
msg_suffix = " within %s places" % places
else:
delta = delta or 1e-8
msg_suffix = " within %s delta" % delta
# Compare all keys in both dicts, populating error_msg.
error_msg = ""
for key in set(dict1.keys()) | set(dict2.keys()):
val1 = dict1.get(key, default_value)
val2 = dict2.get(key, default_value)
if not valid_comparison(abs(val1 - val2)):
error_msg += f"({safe_repr(key)}: {safe_repr(val1)} != {safe_repr(val2)}), "
if error_msg:
return error_msg[:-2] + msg_suffix
else:
return ""
# Maintain naming backwards compatibility for downstream packages.
BasicQiskitTestCase = QiskitTestCase
if HAS_FIXTURES:
QiskitTestCase = FullQiskitTestCase
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=attribute-defined-outside-init,invalid-name,missing-type-doc
# pylint: disable=unused-argument,broad-except,bad-staticmethod-argument
# pylint: disable=inconsistent-return-statements
"""Base TestCases for the unit tests.
Implementors of unit tests for Terra are encouraged to subclass
``QiskitTestCase`` in order to take advantage of utility functions (for example,
the environment variables for customizing different options), and the
decorators in the ``decorators`` package.
"""
import inspect
import logging
import os
import sys
import warnings
import unittest
from unittest.util import safe_repr
try:
import fixtures
import testtools
HAS_FIXTURES = True
except ImportError:
HAS_FIXTURES = False
from .decorators import enforce_subclasses_call
from .utils import Path, setup_test_logging
__unittest = True # Allows shorter stack trace for .assertDictAlmostEqual
# If testtools is installed use that as a (mostly) drop in replacement for
# unittest's TestCase. This will enable the fixtures used for capturing stdout
# stderr, and pylogging to attach the output to stestr's result stream.
if HAS_FIXTURES:
class BaseTestCase(testtools.TestCase):
"""Base test class."""
# testtools maintains their own version of assert functions which mostly
# behave as value adds to the std unittest assertion methods. However,
# for assertEquals and assertRaises modern unittest has diverged from
# the forks in testtools and offer more (or different) options that are
# incompatible testtools versions. Just use the stdlib versions so that
# our tests work as expected.
assertRaises = unittest.TestCase.assertRaises
assertEqual = unittest.TestCase.assertEqual
else:
class BaseTestCase(unittest.TestCase):
"""Base test class."""
pass
@enforce_subclasses_call(["setUp", "setUpClass", "tearDown", "tearDownClass"])
class BaseQiskitTestCase(BaseTestCase):
"""Additions for test cases for all Qiskit-family packages.
The additions here are intended for all packages, not just Terra. Terra-specific logic should
be in the Terra-specific classes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__setup_called = False
self.__teardown_called = False
def setUp(self):
super().setUp()
if self.__setup_called:
raise ValueError(
"In File: %s\n"
"TestCase.setUp was already called. Do not explicitly call "
"setUp from your tests. In your own setUp, use super to call "
"the base setUp." % (sys.modules[self.__class__.__module__].__file__,)
)
self.__setup_called = True
def tearDown(self):
super().tearDown()
if self.__teardown_called:
raise ValueError(
"In File: %s\n"
"TestCase.tearDown was already called. Do not explicitly call "
"tearDown from your tests. In your own tearDown, use super to "
"call the base tearDown." % (sys.modules[self.__class__.__module__].__file__,)
)
self.__teardown_called = True
@staticmethod
def _get_resource_path(filename, path=Path.TEST):
"""Get the absolute path to a resource.
Args:
filename (string): filename or relative path to the resource.
path (Path): path used as relative to the filename.
Returns:
str: the absolute path to the resource.
"""
return os.path.normpath(os.path.join(path.value, filename))
def assertDictAlmostEqual(
self, dict1, dict2, delta=None, msg=None, places=None, default_value=0
):
"""Assert two dictionaries with numeric values are almost equal.
Fail if the two dictionaries are unequal as determined by
comparing that the difference between values with the same key are
not greater than delta (default 1e-8), or that difference rounded
to the given number of decimal places is not zero. If a key in one
dictionary is not in the other the default_value keyword argument
will be used for the missing value (default 0). If the two objects
compare equal then they will automatically compare almost equal.
Args:
dict1 (dict): a dictionary.
dict2 (dict): a dictionary.
delta (number): threshold for comparison (defaults to 1e-8).
msg (str): return a custom message on failure.
places (int): number of decimal places for comparison.
default_value (number): default value for missing keys.
Raises:
TypeError: if the arguments are not valid (both `delta` and
`places` are specified).
AssertionError: if the dictionaries are not almost equal.
"""
error_msg = dicts_almost_equal(dict1, dict2, delta, places, default_value)
if error_msg:
msg = self._formatMessage(msg, error_msg)
raise self.failureException(msg)
class QiskitTestCase(BaseQiskitTestCase):
"""Terra-specific extra functionality for test cases."""
def tearDown(self):
super().tearDown()
# Reset the default providers, as in practice they acts as a singleton
# due to importing the instances from the top-level qiskit namespace.
from qiskit.providers.basicaer import BasicAer
BasicAer._backends = BasicAer._verify_backends()
@classmethod
def setUpClass(cls):
super().setUpClass()
# Determines if the TestCase is using IBMQ credentials.
cls.using_ibmq_credentials = False
# Set logging to file and stdout if the LOG_LEVEL envar is set.
cls.log = logging.getLogger(cls.__name__)
if os.getenv("LOG_LEVEL"):
filename = "%s.log" % os.path.splitext(inspect.getfile(cls))[0]
setup_test_logging(cls.log, os.getenv("LOG_LEVEL"), filename)
warnings.filterwarnings("error", category=DeprecationWarning)
allow_DeprecationWarning_modules = [
"test.python.pulse.test_parameters",
"test.python.pulse.test_transforms",
"test.python.circuit.test_gate_power",
"test.python.pulse.test_builder",
"test.python.pulse.test_block",
"test.python.quantum_info.operators.symplectic.test_legacy_pauli",
"qiskit.quantum_info.operators.pauli",
"pybobyqa",
"numba",
"qiskit.utils.measurement_error_mitigation",
"qiskit.circuit.library.standard_gates.x",
"qiskit.pulse.schedule",
"qiskit.pulse.instructions.instruction",
"qiskit.pulse.instructions.play",
"qiskit.pulse.library.parametric_pulses",
"qiskit.quantum_info.operators.symplectic.pauli",
"test.python.dagcircuit.test_dagcircuit",
"importlib_metadata",
]
for mod in allow_DeprecationWarning_modules:
warnings.filterwarnings("default", category=DeprecationWarning, module=mod)
allow_DeprecationWarning_message = [
r".*LogNormalDistribution.*",
r".*NormalDistribution.*",
r".*UniformDistribution.*",
r".*QuantumCircuit\.combine.*",
r".*QuantumCircuit\.__add__.*",
r".*QuantumCircuit\.__iadd__.*",
r".*QuantumCircuit\.extend.*",
r".*qiskit\.circuit\.library\.standard_gates\.ms import.*",
r"elementwise comparison failed.*",
r"The jsonschema validation included in qiskit-terra.*",
r"The DerivativeBase.parameter_expression_grad method.*",
r"Back-references to from Bit instances.*",
r"The QuantumCircuit.u. method.*",
r"The QuantumCircuit.cu.",
r"The CXDirection pass has been deprecated",
r"The pauli_basis function with PauliTable.*",
]
for msg in allow_DeprecationWarning_message:
warnings.filterwarnings("default", category=DeprecationWarning, message=msg)
class FullQiskitTestCase(QiskitTestCase):
"""Terra-specific further additions for test cases, if ``testtools`` is available.
It is not normally safe to derive from this class by name; on import, Terra checks if the
necessary packages are available, and binds this class to the name :obj:`~QiskitTestCase` if so.
If you derive directly from it, you may try and instantiate the class without satisfying its
dependencies."""
def setUp(self):
super().setUp()
if os.environ.get("QISKIT_TEST_CAPTURE_STREAMS"):
stdout = self.useFixture(fixtures.StringStream("stdout")).stream
self.useFixture(fixtures.MonkeyPatch("sys.stdout", stdout))
stderr = self.useFixture(fixtures.StringStream("stderr")).stream
self.useFixture(fixtures.MonkeyPatch("sys.stderr", stderr))
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False, level=None))
def dicts_almost_equal(dict1, dict2, delta=None, places=None, default_value=0):
"""Test if two dictionaries with numeric values are almost equal.
Fail if the two dictionaries are unequal as determined by
comparing that the difference between values with the same key are
not greater than delta (default 1e-8), or that difference rounded
to the given number of decimal places is not zero. If a key in one
dictionary is not in the other the default_value keyword argument
will be used for the missing value (default 0). If the two objects
compare equal then they will automatically compare almost equal.
Args:
dict1 (dict): a dictionary.
dict2 (dict): a dictionary.
delta (number): threshold for comparison (defaults to 1e-8).
places (int): number of decimal places for comparison.
default_value (number): default value for missing keys.
Raises:
TypeError: if the arguments are not valid (both `delta` and
`places` are specified).
Returns:
String: Empty string if dictionaries are almost equal. A description
of their difference if they are deemed not almost equal.
"""
def valid_comparison(value):
"""compare value to delta, within places accuracy"""
if places is not None:
return round(value, places) == 0
else:
return value < delta
# Check arguments.
if dict1 == dict2:
return ""
if places is not None:
if delta is not None:
raise TypeError("specify delta or places not both")
msg_suffix = " within %s places" % places
else:
delta = delta or 1e-8
msg_suffix = " within %s delta" % delta
# Compare all keys in both dicts, populating error_msg.
error_msg = ""
for key in set(dict1.keys()) | set(dict2.keys()):
val1 = dict1.get(key, default_value)
val2 = dict2.get(key, default_value)
if not valid_comparison(abs(val1 - val2)):
error_msg += f"({safe_repr(key)}: {safe_repr(val1)} != {safe_repr(val2)}), "
if error_msg:
return error_msg[:-2] + msg_suffix
else:
return ""
# Maintain naming backwards compatibility for downstream packages.
BasicQiskitTestCase = QiskitTestCase
if HAS_FIXTURES:
QiskitTestCase = FullQiskitTestCase
|
en
| 0.792995
|
# This code is part of Qiskit. # # (C) Copyright IBM 2017, 2018. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # pylint: disable=attribute-defined-outside-init,invalid-name,missing-type-doc # pylint: disable=unused-argument,broad-except,bad-staticmethod-argument # pylint: disable=inconsistent-return-statements Base TestCases for the unit tests. Implementors of unit tests for Terra are encouraged to subclass ``QiskitTestCase`` in order to take advantage of utility functions (for example, the environment variables for customizing different options), and the decorators in the ``decorators`` package. # Allows shorter stack trace for .assertDictAlmostEqual # If testtools is installed use that as a (mostly) drop in replacement for # unittest's TestCase. This will enable the fixtures used for capturing stdout # stderr, and pylogging to attach the output to stestr's result stream. Base test class. # testtools maintains their own version of assert functions which mostly # behave as value adds to the std unittest assertion methods. However, # for assertEquals and assertRaises modern unittest has diverged from # the forks in testtools and offer more (or different) options that are # incompatible testtools versions. Just use the stdlib versions so that # our tests work as expected. Base test class. Additions for test cases for all Qiskit-family packages. The additions here are intended for all packages, not just Terra. Terra-specific logic should be in the Terra-specific classes. Get the absolute path to a resource. Args: filename (string): filename or relative path to the resource. path (Path): path used as relative to the filename. Returns: str: the absolute path to the resource. Assert two dictionaries with numeric values are almost equal. Fail if the two dictionaries are unequal as determined by comparing that the difference between values with the same key are not greater than delta (default 1e-8), or that difference rounded to the given number of decimal places is not zero. If a key in one dictionary is not in the other the default_value keyword argument will be used for the missing value (default 0). If the two objects compare equal then they will automatically compare almost equal. Args: dict1 (dict): a dictionary. dict2 (dict): a dictionary. delta (number): threshold for comparison (defaults to 1e-8). msg (str): return a custom message on failure. places (int): number of decimal places for comparison. default_value (number): default value for missing keys. Raises: TypeError: if the arguments are not valid (both `delta` and `places` are specified). AssertionError: if the dictionaries are not almost equal. Terra-specific extra functionality for test cases. # Reset the default providers, as in practice they acts as a singleton # due to importing the instances from the top-level qiskit namespace. # Determines if the TestCase is using IBMQ credentials. # Set logging to file and stdout if the LOG_LEVEL envar is set. Terra-specific further additions for test cases, if ``testtools`` is available. It is not normally safe to derive from this class by name; on import, Terra checks if the necessary packages are available, and binds this class to the name :obj:`~QiskitTestCase` if so. If you derive directly from it, you may try and instantiate the class without satisfying its dependencies. Test if two dictionaries with numeric values are almost equal. Fail if the two dictionaries are unequal as determined by comparing that the difference between values with the same key are not greater than delta (default 1e-8), or that difference rounded to the given number of decimal places is not zero. If a key in one dictionary is not in the other the default_value keyword argument will be used for the missing value (default 0). If the two objects compare equal then they will automatically compare almost equal. Args: dict1 (dict): a dictionary. dict2 (dict): a dictionary. delta (number): threshold for comparison (defaults to 1e-8). places (int): number of decimal places for comparison. default_value (number): default value for missing keys. Raises: TypeError: if the arguments are not valid (both `delta` and `places` are specified). Returns: String: Empty string if dictionaries are almost equal. A description of their difference if they are deemed not almost equal. compare value to delta, within places accuracy # Check arguments. # Compare all keys in both dicts, populating error_msg. # Maintain naming backwards compatibility for downstream packages.
| 1.92395
| 2
|
apetools/parameters/iperf_test_parameters.py
|
rsnakamura/oldape
| 0
|
6625874
|
<reponame>rsnakamura/oldape
# python standard library
from collections import namedtuple
class IperfTestParameters(namedtuple("IperfTestParameters", ["filename",
"iperf_parameters"])):
"""
IperfTestParameters add a filename to the iperf parameters.
"""
def __str__(self):
"""
:return: string representation of iperf_parameters
"""
return str(self.iperf_parameters)
# end class IperfTestParameters
|
# python standard library
from collections import namedtuple
class IperfTestParameters(namedtuple("IperfTestParameters", ["filename",
"iperf_parameters"])):
"""
IperfTestParameters add a filename to the iperf parameters.
"""
def __str__(self):
"""
:return: string representation of iperf_parameters
"""
return str(self.iperf_parameters)
# end class IperfTestParameters
|
en
| 0.307204
|
# python standard library IperfTestParameters add a filename to the iperf parameters. :return: string representation of iperf_parameters # end class IperfTestParameters
| 3.01742
| 3
|
src/saas/bkuser_shell/organization/urls.py
|
Canway-shiisa/bk-user
| 0
|
6625875
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.conf.urls import url
from .views.departments import DepartmentsApiViewSet, DepartmentViewSet
from .views.misc import SearchViewSet
from .views.profiles import LoginInfoViewSet, ProfilesApiViewSet, ProfilesViewSet
PVAR_DEPARTMENT_ID = r"(?P<department_id>[a-z0-9-]+)"
PVAR_PROFILE_ID = r"(?P<profile_id>[a-z0-9-]+)"
PVAR_CATEGORY_ID = r"(?P<category_id>[\d]+)"
PVAR_ANOTHER_DEPARTMENT_ID = r"(?P<another_department_id>[a-z0-9-]+)"
urlpatterns = [
######################
# Department related #
######################
url(
r"^api/v2/departments/%s/profiles/$" % PVAR_DEPARTMENT_ID,
DepartmentViewSet.as_view({"get": "get_profiles", "post": "add_profiles"}),
name="department.profiles",
),
url(r"^api/v2/departments/$", DepartmentViewSet.as_view({"get": "list", "post": "create"}), name="departments"),
url(
r"^api/v2/categories/%s/departments/search/$" % PVAR_CATEGORY_ID,
DepartmentViewSet.as_view({"get": "search_in_category"}),
name="departments.search_in_category",
),
url(
r"^api/v2/departments/%s/$" % PVAR_DEPARTMENT_ID,
DepartmentViewSet.as_view({"get": "retrieve", "delete": "delete", "patch": "update"}),
name="department.actions",
),
url(
r"^api/v2/departments/%s/switch_order/%s/$" % (PVAR_DEPARTMENT_ID, PVAR_ANOTHER_DEPARTMENT_ID),
DepartmentViewSet.as_view({"patch": "switch_order"}),
name="department.switch_order",
),
###################
# Profile related #
###################
url(
r"^api/v2/me/$",
LoginInfoViewSet.as_view({"get": "me"}),
name="profiles.login_info",
),
url(
r"^api/v2/profiles/$",
ProfilesViewSet.as_view({"post": "create"}),
name="profiles",
),
url(
r"^api/v2/categories/%s/profiles/$" % PVAR_CATEGORY_ID,
ProfilesViewSet.as_view({"get": "list"}),
name="profiles",
),
url(
r"^api/v2/profiles/%s/$" % PVAR_PROFILE_ID,
ProfilesViewSet.as_view({"get": "retrieve", "patch": "update"}),
name="profiles.actions",
),
url(
r"^api/v2/profiles/%s/restoration/$" % PVAR_PROFILE_ID,
ProfilesViewSet.as_view({"post": "restoration"}),
name="profiles.restoration",
),
url(
r"^api/v2/batch/profiles/$",
ProfilesViewSet.as_view({"patch": "multiple_update", "delete": "multiple_delete"}),
name="profiles.batch.actions",
),
##########
# search #
##########
url(
r"^api/v2/search/detail/$",
SearchViewSet.as_view({"get": "search"}),
name="profiles.login_info",
),
]
urlpatterns += [
url(
r"^api/v3/profiles",
ProfilesApiViewSet.as_view({"get": "get"}),
name="api.profiles",
),
url(
r"^api/v3/departments",
DepartmentsApiViewSet.as_view({"get": "get"}),
name="api.departments",
),
]
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.conf.urls import url
from .views.departments import DepartmentsApiViewSet, DepartmentViewSet
from .views.misc import SearchViewSet
from .views.profiles import LoginInfoViewSet, ProfilesApiViewSet, ProfilesViewSet
PVAR_DEPARTMENT_ID = r"(?P<department_id>[a-z0-9-]+)"
PVAR_PROFILE_ID = r"(?P<profile_id>[a-z0-9-]+)"
PVAR_CATEGORY_ID = r"(?P<category_id>[\d]+)"
PVAR_ANOTHER_DEPARTMENT_ID = r"(?P<another_department_id>[a-z0-9-]+)"
urlpatterns = [
######################
# Department related #
######################
url(
r"^api/v2/departments/%s/profiles/$" % PVAR_DEPARTMENT_ID,
DepartmentViewSet.as_view({"get": "get_profiles", "post": "add_profiles"}),
name="department.profiles",
),
url(r"^api/v2/departments/$", DepartmentViewSet.as_view({"get": "list", "post": "create"}), name="departments"),
url(
r"^api/v2/categories/%s/departments/search/$" % PVAR_CATEGORY_ID,
DepartmentViewSet.as_view({"get": "search_in_category"}),
name="departments.search_in_category",
),
url(
r"^api/v2/departments/%s/$" % PVAR_DEPARTMENT_ID,
DepartmentViewSet.as_view({"get": "retrieve", "delete": "delete", "patch": "update"}),
name="department.actions",
),
url(
r"^api/v2/departments/%s/switch_order/%s/$" % (PVAR_DEPARTMENT_ID, PVAR_ANOTHER_DEPARTMENT_ID),
DepartmentViewSet.as_view({"patch": "switch_order"}),
name="department.switch_order",
),
###################
# Profile related #
###################
url(
r"^api/v2/me/$",
LoginInfoViewSet.as_view({"get": "me"}),
name="profiles.login_info",
),
url(
r"^api/v2/profiles/$",
ProfilesViewSet.as_view({"post": "create"}),
name="profiles",
),
url(
r"^api/v2/categories/%s/profiles/$" % PVAR_CATEGORY_ID,
ProfilesViewSet.as_view({"get": "list"}),
name="profiles",
),
url(
r"^api/v2/profiles/%s/$" % PVAR_PROFILE_ID,
ProfilesViewSet.as_view({"get": "retrieve", "patch": "update"}),
name="profiles.actions",
),
url(
r"^api/v2/profiles/%s/restoration/$" % PVAR_PROFILE_ID,
ProfilesViewSet.as_view({"post": "restoration"}),
name="profiles.restoration",
),
url(
r"^api/v2/batch/profiles/$",
ProfilesViewSet.as_view({"patch": "multiple_update", "delete": "multiple_delete"}),
name="profiles.batch.actions",
),
##########
# search #
##########
url(
r"^api/v2/search/detail/$",
SearchViewSet.as_view({"get": "search"}),
name="profiles.login_info",
),
]
urlpatterns += [
url(
r"^api/v3/profiles",
ProfilesApiViewSet.as_view({"get": "get"}),
name="api.profiles",
),
url(
r"^api/v3/departments",
DepartmentsApiViewSet.as_view({"get": "get"}),
name="api.departments",
),
]
|
en
| 0.744854
|
# -*- coding: utf-8 -*- TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ###################### # Department related # ###################### ################### # Profile related # ################### ########## # search # ##########
| 1.668963
| 2
|
lib/python/treadmill/tests/cli_test.py
|
bretttegartms/treadmill
| 133
|
6625876
|
<reponame>bretttegartms/treadmill
"""Unit test for treadmill.cli.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import sys
import unittest
import click
import mock
import six
from treadmill import cli
from treadmill import context
from treadmill.formatter import tablefmt
def _lines(tbl):
"""Convert table to list of lines."""
return list(six.moves.map(str.strip, str(tbl).splitlines()))
class CliTest(unittest.TestCase):
"""These are Tests for teadmill.warm."""
def test_table(self):
"""Tests table output."""
schema = [('A', 'a', None),
('b', None, None),
('c', None, None)]
tbl = tablefmt.make_dict_to_table(schema)
list_tbl = tablefmt.make_list_to_table(schema)
self.assertEqual(
_lines(tbl({'a': 1, 'b': 2, 'c': [1, 2, 3]})),
['A : 1',
'b : 2',
'c : 1,2,3']
)
self.assertEqual(
_lines(list_tbl([{'a': 1, 'b': 2, 'c': [1, 2, 3]}])),
['A b c',
'1 2 1,2,3']
)
@mock.patch('click.echo', mock.Mock())
@mock.patch('sys.exit', mock.Mock())
def test_exceptions_wrapper(self):
"""Tests wrapping function with exceptions wrapper."""
class AExc(Exception):
"""Sample exception.
"""
class BExc(Exception):
"""Another exception.
"""
on_exceptions = cli.handle_exceptions([
(AExc, 'a'),
(BExc, 'b'),
])
@on_exceptions
def _raise_a():
"""Raise A exception."""
raise AExc()
@on_exceptions
def _raise_b():
"""Raise B exception."""
raise BExc()
_raise_a()
click.echo.assert_called_with('a', err=True)
sys.exit.assert_called_with(1)
click.echo.reset_mock()
sys.exit.reset_mock()
_raise_b()
click.echo.assert_called_with('b', err=True)
sys.exit.assert_called_with(1)
def test_combine(self):
"""Test combining lists.
"""
self.assertEqual(['a', 'b', 'c'], cli.combine(['a', 'b,c']))
self.assertEqual(None, cli.combine(['-']))
def test_handle_cell_opt(self):
"""Test parsing cell CLI option."""
param = collections.namedtuple('param', 'name')('cell')
ctx = collections.namedtuple('ctx', 'resilient_parsing')(False)
cli.handle_context_opt(ctx, param, 'foo')
self.assertEqual(context.GLOBAL.cell, 'foo')
def test_handle_fq_cell_opt(self):
"""Test parsing cell CLI option."""
param = collections.namedtuple('param', 'name')('cell')
ctx = collections.namedtuple('ctx', 'resilient_parsing')(False)
cli.handle_context_opt(ctx, param, 'foo.xx.com')
self.assertEqual(context.GLOBAL.cell, 'foo')
self.assertEqual(context.GLOBAL.dns_domain, 'xx.com')
if __name__ == '__main__':
unittest.main()
|
"""Unit test for treadmill.cli.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import sys
import unittest
import click
import mock
import six
from treadmill import cli
from treadmill import context
from treadmill.formatter import tablefmt
def _lines(tbl):
"""Convert table to list of lines."""
return list(six.moves.map(str.strip, str(tbl).splitlines()))
class CliTest(unittest.TestCase):
"""These are Tests for teadmill.warm."""
def test_table(self):
"""Tests table output."""
schema = [('A', 'a', None),
('b', None, None),
('c', None, None)]
tbl = tablefmt.make_dict_to_table(schema)
list_tbl = tablefmt.make_list_to_table(schema)
self.assertEqual(
_lines(tbl({'a': 1, 'b': 2, 'c': [1, 2, 3]})),
['A : 1',
'b : 2',
'c : 1,2,3']
)
self.assertEqual(
_lines(list_tbl([{'a': 1, 'b': 2, 'c': [1, 2, 3]}])),
['A b c',
'1 2 1,2,3']
)
@mock.patch('click.echo', mock.Mock())
@mock.patch('sys.exit', mock.Mock())
def test_exceptions_wrapper(self):
"""Tests wrapping function with exceptions wrapper."""
class AExc(Exception):
"""Sample exception.
"""
class BExc(Exception):
"""Another exception.
"""
on_exceptions = cli.handle_exceptions([
(AExc, 'a'),
(BExc, 'b'),
])
@on_exceptions
def _raise_a():
"""Raise A exception."""
raise AExc()
@on_exceptions
def _raise_b():
"""Raise B exception."""
raise BExc()
_raise_a()
click.echo.assert_called_with('a', err=True)
sys.exit.assert_called_with(1)
click.echo.reset_mock()
sys.exit.reset_mock()
_raise_b()
click.echo.assert_called_with('b', err=True)
sys.exit.assert_called_with(1)
def test_combine(self):
"""Test combining lists.
"""
self.assertEqual(['a', 'b', 'c'], cli.combine(['a', 'b,c']))
self.assertEqual(None, cli.combine(['-']))
def test_handle_cell_opt(self):
"""Test parsing cell CLI option."""
param = collections.namedtuple('param', 'name')('cell')
ctx = collections.namedtuple('ctx', 'resilient_parsing')(False)
cli.handle_context_opt(ctx, param, 'foo')
self.assertEqual(context.GLOBAL.cell, 'foo')
def test_handle_fq_cell_opt(self):
"""Test parsing cell CLI option."""
param = collections.namedtuple('param', 'name')('cell')
ctx = collections.namedtuple('ctx', 'resilient_parsing')(False)
cli.handle_context_opt(ctx, param, 'foo.xx.com')
self.assertEqual(context.GLOBAL.cell, 'foo')
self.assertEqual(context.GLOBAL.dns_domain, 'xx.com')
if __name__ == '__main__':
unittest.main()
|
en
| 0.585894
|
Unit test for treadmill.cli. Convert table to list of lines. These are Tests for teadmill.warm. Tests table output. Tests wrapping function with exceptions wrapper. Sample exception. Another exception. Raise A exception. Raise B exception. Test combining lists. Test parsing cell CLI option. Test parsing cell CLI option.
| 2.895557
| 3
|
check_model.py
|
Bharat-Runwal/path2vec
| 31
|
6625877
|
#!/usr/bin/python3
# coding: utf-8
import sys
import gensim
import logging
from itertools import combinations
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
modelfile = sys.argv[1]
if modelfile.endswith('.bin.gz'):
model = gensim.models.KeyedVectors.load_word2vec_format(modelfile, binary=True)
else:
model = gensim.models.KeyedVectors.load_word2vec_format(modelfile, binary=False)
word0 = 'measure.n.02'
word1 = 'fundamental_quantity.n.01'
word2 = 'person.n.01'
word3 = 'lover.n.03'
for pair in combinations([word0, word1, word2, word3], 2):
print(pair, model.similarity(pair[0], pair[1]))
for word in [word0, word1, word2, word3]:
print(word)
for i in model.most_similar(word):
print(i)
|
#!/usr/bin/python3
# coding: utf-8
import sys
import gensim
import logging
from itertools import combinations
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
modelfile = sys.argv[1]
if modelfile.endswith('.bin.gz'):
model = gensim.models.KeyedVectors.load_word2vec_format(modelfile, binary=True)
else:
model = gensim.models.KeyedVectors.load_word2vec_format(modelfile, binary=False)
word0 = 'measure.n.02'
word1 = 'fundamental_quantity.n.01'
word2 = 'person.n.01'
word3 = 'lover.n.03'
for pair in combinations([word0, word1, word2, word3], 2):
print(pair, model.similarity(pair[0], pair[1]))
for word in [word0, word1, word2, word3]:
print(word)
for i in model.most_similar(word):
print(i)
|
en
| 0.362849
|
#!/usr/bin/python3 # coding: utf-8
| 2.759651
| 3
|
bitglitter/write/render/videorender.py
|
eurekaX696/BitGlitter-Python
| 1
|
6625878
|
<filename>bitglitter/write/render/videorender.py
import cv2
import logging
from pathlib import Path
def render_video(stream_output_path, default_output_path, stream_name_file_output, working_directory, total_frames,
frames_per_second, stream_sha256, block_width, block_height, pixel_width, stream_name):
"""Taking in whichever arguments, it takes all of the rendered frames, and merges them into an .mp4 ."""
logging.info('Rendering video...')
if stream_output_path:
video_output_path = Path(stream_output_path)
else:
video_output_path = Path(default_output_path)
if stream_name_file_output:
video_name = stream_name
else:
video_name = stream_sha256
frame_size = (block_width * pixel_width, block_height * pixel_width)
save_path = f"{Path(video_output_path / video_name)}.mp4"
output = cv2.VideoWriter(str(save_path), cv2.VideoWriter.fourcc(*'mp4v'), frames_per_second, frame_size)
for frame in range(total_frames):
logging.info(f'Rendering video frame {frame + 1} of {total_frames}... '
f'({round((((frame + 1) / total_frames) * 100), 2):.2f} %)')
image = cv2.imread(str(Path(working_directory / f'{frame + 1}.png')))
output.write(image)
output.release()
logging.info('Rendering video complete.')
logging.info(f'Video save path: {save_path}')
|
<filename>bitglitter/write/render/videorender.py
import cv2
import logging
from pathlib import Path
def render_video(stream_output_path, default_output_path, stream_name_file_output, working_directory, total_frames,
frames_per_second, stream_sha256, block_width, block_height, pixel_width, stream_name):
"""Taking in whichever arguments, it takes all of the rendered frames, and merges them into an .mp4 ."""
logging.info('Rendering video...')
if stream_output_path:
video_output_path = Path(stream_output_path)
else:
video_output_path = Path(default_output_path)
if stream_name_file_output:
video_name = stream_name
else:
video_name = stream_sha256
frame_size = (block_width * pixel_width, block_height * pixel_width)
save_path = f"{Path(video_output_path / video_name)}.mp4"
output = cv2.VideoWriter(str(save_path), cv2.VideoWriter.fourcc(*'mp4v'), frames_per_second, frame_size)
for frame in range(total_frames):
logging.info(f'Rendering video frame {frame + 1} of {total_frames}... '
f'({round((((frame + 1) / total_frames) * 100), 2):.2f} %)')
image = cv2.imread(str(Path(working_directory / f'{frame + 1}.png')))
output.write(image)
output.release()
logging.info('Rendering video complete.')
logging.info(f'Video save path: {save_path}')
|
en
| 0.86544
|
Taking in whichever arguments, it takes all of the rendered frames, and merges them into an .mp4 .
| 3.040292
| 3
|
DataInsights/src/prose/__init__.py
|
vincentpun/ConformanceConstraintsReproducibility
| 0
|
6625879
|
<gh_stars>0
# Copyright (c) Microsoft Corporation. All rights reserved.
__import__("pkg_resources").declare_namespace(__name__)
|
# Copyright (c) Microsoft Corporation. All rights reserved.
__import__("pkg_resources").declare_namespace(__name__)
|
en
| 0.865191
|
# Copyright (c) Microsoft Corporation. All rights reserved.
| 1.148173
| 1
|
2056/slu.py
|
matrixjoeq/timus_solutions
| 0
|
6625880
|
<filename>2056/slu.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
2056. Scholarship
Time limit: 1.0 second
Memory limit: 64 MB
[Description]
At last the first term at the University came to its finish. Android Vasya has
already passed all the exams and wants to know if he gets a scholarship. There
is the following practice of giving scholarship to students at the University:
if a student has got satisfactory marks, the scholarship is not given,
if a student has passed through the examination period with only excellent
marks, he gets a personal scholarship,
if a student doesn’t get a personal scholarship and his average mark is not
less than 4.5, he gets a high scholarship,
if a student gets neither high nor personal scholarship and doesn’t have
satisfactory marks, he gets a common scholarship.
A satisfactory mark corresponds to value 3, a good mark corresponds to value 4,
and an excellent mark corresponds to value 5. An average mark for a student is
the average value of all the marks this student got in his exams. Help Vasya
find out which scholarship he gets.
[Input]
The first line contains an integer n that is the number of exams (1 ≤ n ≤ 10).
In the i-th of the next n lines there is an integer mi that is value of Vasya’s
mark in i-th exam (3 ≤ mi ≤ 5).
[Output]
If Vasya doesn’t get any scholarship output “None”. If he gets a common scholar-
ship output “Common”, if he gets a high scholarship output “High”, if he gets a
personal one output “Named”.
'''
import sys;
import math;
def calc():
n = int(sys.stdin.readline())
scholarship = 'None'
satisfactory = False
personal = True
total = 0
for i in range(n):
score = int(sys.stdin.readline())
if (score != 5):
personal = False
if (score <= 3):
satisfactory = True
total = total + score
if (not satisfactory):
if (personal):
scholarship = 'Named'
else:
average = float(total) / n
if (average >= 4.5):
scholarship = 'High'
else:
scholarship = 'Common'
print scholarship
if __name__ == '__main__':
calc()
|
<filename>2056/slu.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
2056. Scholarship
Time limit: 1.0 second
Memory limit: 64 MB
[Description]
At last the first term at the University came to its finish. Android Vasya has
already passed all the exams and wants to know if he gets a scholarship. There
is the following practice of giving scholarship to students at the University:
if a student has got satisfactory marks, the scholarship is not given,
if a student has passed through the examination period with only excellent
marks, he gets a personal scholarship,
if a student doesn’t get a personal scholarship and his average mark is not
less than 4.5, he gets a high scholarship,
if a student gets neither high nor personal scholarship and doesn’t have
satisfactory marks, he gets a common scholarship.
A satisfactory mark corresponds to value 3, a good mark corresponds to value 4,
and an excellent mark corresponds to value 5. An average mark for a student is
the average value of all the marks this student got in his exams. Help Vasya
find out which scholarship he gets.
[Input]
The first line contains an integer n that is the number of exams (1 ≤ n ≤ 10).
In the i-th of the next n lines there is an integer mi that is value of Vasya’s
mark in i-th exam (3 ≤ mi ≤ 5).
[Output]
If Vasya doesn’t get any scholarship output “None”. If he gets a common scholar-
ship output “Common”, if he gets a high scholarship output “High”, if he gets a
personal one output “Named”.
'''
import sys;
import math;
def calc():
n = int(sys.stdin.readline())
scholarship = 'None'
satisfactory = False
personal = True
total = 0
for i in range(n):
score = int(sys.stdin.readline())
if (score != 5):
personal = False
if (score <= 3):
satisfactory = True
total = total + score
if (not satisfactory):
if (personal):
scholarship = 'Named'
else:
average = float(total) / n
if (average >= 4.5):
scholarship = 'High'
else:
scholarship = 'Common'
print scholarship
if __name__ == '__main__':
calc()
|
en
| 0.926159
|
#!/usr/bin/python # -*- coding: utf-8 -*- 2056. Scholarship Time limit: 1.0 second Memory limit: 64 MB [Description] At last the first term at the University came to its finish. Android Vasya has already passed all the exams and wants to know if he gets a scholarship. There is the following practice of giving scholarship to students at the University: if a student has got satisfactory marks, the scholarship is not given, if a student has passed through the examination period with only excellent marks, he gets a personal scholarship, if a student doesn’t get a personal scholarship and his average mark is not less than 4.5, he gets a high scholarship, if a student gets neither high nor personal scholarship and doesn’t have satisfactory marks, he gets a common scholarship. A satisfactory mark corresponds to value 3, a good mark corresponds to value 4, and an excellent mark corresponds to value 5. An average mark for a student is the average value of all the marks this student got in his exams. Help Vasya find out which scholarship he gets. [Input] The first line contains an integer n that is the number of exams (1 ≤ n ≤ 10). In the i-th of the next n lines there is an integer mi that is value of Vasya’s mark in i-th exam (3 ≤ mi ≤ 5). [Output] If Vasya doesn’t get any scholarship output “None”. If he gets a common scholar- ship output “Common”, if he gets a high scholarship output “High”, if he gets a personal one output “Named”.
| 3.478914
| 3
|
classy_mermaid/tree/graphs.py
|
e-lo/classy-mermaid
| 0
|
6625881
|
<filename>classy_mermaid/tree/graphs.py
from networkx import DiGraph
from .module_tree import ModuleTree
def tree_to_graph(tree: ModuleTree) -> DiGraph:
"""
Convert a tree to a graph.
"""
graph = DiGraph()
for node_id, node in tree.nodes.items():
graph.add_node(node_id)
for child_id in node.children:
graph.add_edge(node_id, child_id)
for parent_id in node.parents:
graph.add_edge(parent_id, node_id)
return graph
|
<filename>classy_mermaid/tree/graphs.py
from networkx import DiGraph
from .module_tree import ModuleTree
def tree_to_graph(tree: ModuleTree) -> DiGraph:
"""
Convert a tree to a graph.
"""
graph = DiGraph()
for node_id, node in tree.nodes.items():
graph.add_node(node_id)
for child_id in node.children:
graph.add_edge(node_id, child_id)
for parent_id in node.parents:
graph.add_edge(parent_id, node_id)
return graph
|
en
| 0.890172
|
Convert a tree to a graph.
| 3.349045
| 3
|
demo/validate_peer_interface.py
|
111pontes/xr-um
| 1
|
6625882
|
<reponame>111pontes/xr-um
#!/usr/bin/env python3
#
# Copyright 2019 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Validate peer interface operation.
usage: validate_peer_interface.py [-h] [-v] node name
positional arguments:
node node streaming interface status
name interface name
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
import kafka
import json
import time
import argparse
import logging
KAFKA_TOPIC = 'pipeline'
KAFKA_BOOTSTRAP_SERVER = 'localhost:9092'
STATE_UP = "im-state-up"
TIMEOUT = 30
def validate_peer_interface(kafka_consumer, node, name,
state=STATE_UP,
timeout=TIMEOUT):
"""Validate interface state."""
telemetry_encoding_path = "Cisco-IOS-XR-pfi-im-cmd-oper:interfaces/interface-briefs/interface-brief"
start_time = time.time()
for kafka_msg in kafka_consumer:
msg = json.loads(kafka_msg.value.decode('utf-8'))
if (msg["Telemetry"]["node_id_str"] == node and
msg["Telemetry"]["encoding_path"] == telemetry_encoding_path
and "Rows" in msg):
for row in msg["Rows"]:
# return true if intf in expected oper/admin state
if ("interface-name" in row["Keys"] and
row["Keys"]["interface-name"] == name and
"state" in row["Content"] and
"line-state" in row["Content"] and
row["Content"]["state"] == state and
row["Content"]["line-state"] == state
):
return True
if time.time() - start_time > timeout:
break
return False
if __name__ == "__main__":
"""Execute main program."""
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("node",
help="node router streaming interface status")
parser.add_argument("name",
help="interface name")
args = parser.parse_args()
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("kafka")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create kafka consumer to pipeline topic
kafka_consumer = kafka.KafkaConsumer(KAFKA_TOPIC,
bootstrap_servers=KAFKA_BOOTSTRAP_SERVER,
consumer_timeout_ms=TIMEOUT*1000)
print(validate_peer_interface(kafka_consumer, args.node, args.name))
exit()
# End of script
|
#!/usr/bin/env python3
#
# Copyright 2019 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Validate peer interface operation.
usage: validate_peer_interface.py [-h] [-v] node name
positional arguments:
node node streaming interface status
name interface name
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
import kafka
import json
import time
import argparse
import logging
KAFKA_TOPIC = 'pipeline'
KAFKA_BOOTSTRAP_SERVER = 'localhost:9092'
STATE_UP = "im-state-up"
TIMEOUT = 30
def validate_peer_interface(kafka_consumer, node, name,
state=STATE_UP,
timeout=TIMEOUT):
"""Validate interface state."""
telemetry_encoding_path = "Cisco-IOS-XR-pfi-im-cmd-oper:interfaces/interface-briefs/interface-brief"
start_time = time.time()
for kafka_msg in kafka_consumer:
msg = json.loads(kafka_msg.value.decode('utf-8'))
if (msg["Telemetry"]["node_id_str"] == node and
msg["Telemetry"]["encoding_path"] == telemetry_encoding_path
and "Rows" in msg):
for row in msg["Rows"]:
# return true if intf in expected oper/admin state
if ("interface-name" in row["Keys"] and
row["Keys"]["interface-name"] == name and
"state" in row["Content"] and
"line-state" in row["Content"] and
row["Content"]["state"] == state and
row["Content"]["line-state"] == state
):
return True
if time.time() - start_time > timeout:
break
return False
if __name__ == "__main__":
"""Execute main program."""
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("node",
help="node router streaming interface status")
parser.add_argument("name",
help="interface name")
args = parser.parse_args()
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("kafka")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create kafka consumer to pipeline topic
kafka_consumer = kafka.KafkaConsumer(KAFKA_TOPIC,
bootstrap_servers=KAFKA_BOOTSTRAP_SERVER,
consumer_timeout_ms=TIMEOUT*1000)
print(validate_peer_interface(kafka_consumer, args.node, args.name))
exit()
# End of script
|
en
| 0.638909
|
#!/usr/bin/env python3 # # Copyright 2019 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Validate peer interface operation. usage: validate_peer_interface.py [-h] [-v] node name positional arguments: node node streaming interface status name interface name optional arguments: -h, --help show this help message and exit -v, --verbose print debugging messages Validate interface state. # return true if intf in expected oper/admin state Execute main program. # log debug messages if verbose argument specified # create kafka consumer to pipeline topic # End of script
| 2.0186
| 2
|
code/NumpyIntro.py
|
ju1-eu/pyAnfaenger
| 0
|
6625883
|
<reponame>ju1-eu/pyAnfaenger
import numpy as np
# liste in array wandeln mit numpy
noten_list = [100, 89, 44, 78, 45, 24, 18]
noten_np_array = np.array(noten_list, dtype=np.int8)
# max und min
noten_max = np.max(noten_np_array)
noten_min = np.min(noten_np_array)
# argmax git den index zurück
# 0 1 2 3 4 5 6
# [100, 89, 44, 78, 45, 24, 18]
noten_arg_max = np.argmax(noten_np_array)
noten_arg_min = np.argmin(noten_np_array)
# mittelwert und median
noten_mean = np.mean(noten_np_array)
noten_median = np.median(noten_np_array)
|
import numpy as np
# liste in array wandeln mit numpy
noten_list = [100, 89, 44, 78, 45, 24, 18]
noten_np_array = np.array(noten_list, dtype=np.int8)
# max und min
noten_max = np.max(noten_np_array)
noten_min = np.min(noten_np_array)
# argmax git den index zurück
# 0 1 2 3 4 5 6
# [100, 89, 44, 78, 45, 24, 18]
noten_arg_max = np.argmax(noten_np_array)
noten_arg_min = np.argmin(noten_np_array)
# mittelwert und median
noten_mean = np.mean(noten_np_array)
noten_median = np.median(noten_np_array)
|
de
| 0.8524
|
# liste in array wandeln mit numpy # max und min # argmax git den index zurück # 0 1 2 3 4 5 6 # [100, 89, 44, 78, 45, 24, 18] # mittelwert und median
| 2.973754
| 3
|
networking_vpp/agent/server.py
|
Ebrink/networking-vpp
| 0
|
6625884
|
<filename>networking_vpp/agent/server.py
# Copyright (c) 2017 Cisco Systems, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Note that it does *NOT* at this point have a persistent database, so
# restarting this process will make net-vpp forget about every port it's
# learned, which will not do your system much good (the data is in the
# global 'backends' and 'ports' objects). This is for simplicity of
# demonstration; we have a second codebase already defined that is
# written to OpenStack endpoint principles and includes its ORM, so
# that work was not repeated here where the aim was to get the APIs
# worked out. The two codebases will merge in the future.
from __future__ import absolute_import
# eventlet must be monkey patched early or we confuse urllib3.
import eventlet
eventlet.monkey_patch()
import binascii
from collections import defaultdict
from collections import namedtuple
import etcd
import eventlet.semaphore
from ipaddress import ip_address, ip_interface, ip_network
import os
import re
import shlex
import sys
import time
from networking_vpp.agent import gpe
from networking_vpp.agent import network_interface
from networking_vpp.agent import vpp
from networking_vpp import config_opts
from networking_vpp import constants as nvpp_const
from networking_vpp import etcdutils
from networking_vpp.ext_manager import ExtensionManager
from networking_vpp.extension import VPPAgentExtensionBase
from networking_vpp.mech_vpp import SecurityGroup
from networking_vpp.mech_vpp import SecurityGroupRule
from networking_vpp.utils import device_monitor_async
from networking_vpp.utils import file_monitor
from networking_vpp import version
import neutron_lib.constants as n_const
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
import neutron.conf.agent.securitygroups_rpc
import neutron.conf.plugins.ml2.config
from oslo_config import cfg
from oslo_log import log as logging
from oslo_privsep import priv_context
from oslo_reports import guru_meditation_report as gmr
from oslo_reports import opts as gmr_opts
from oslo_serialization import jsonutils
from oslo_utils import netutils
from typing import cast, Callable, Dict, TypeVar, Union, Optional, Set # noqa
TYPE_GPE = nvpp_const.TYPE_GPE
LOG = logging.getLogger(__name__)
# A model of a bi-directional VPP ACL corresponding to a secgroup
VppAcl = namedtuple('VppAcl', ['in_idx', 'out_idx'])
# TODO(najoy) Expose the below as a config option
# Enable stateful reflexive ACLs in VPP which adds automatic reverse rules
# When False, reverse rules are added by the vpp-agent and
# VPP does not maintain any session states
reflexive_acls = True
# We use eventlet for everything but threads. Here, we need an eventlet-based
# locking mechanism, so we call out eventlet specifically rather than using
# threading.Semaphore.
#
# Our own, strictly eventlet, locking:
_semaphores: Dict[str, eventlet.semaphore.Semaphore] = \
defaultdict(eventlet.semaphore.Semaphore)
def get_root_helper(conf) -> str:
"""Root helper configured for privilege separation"""
return conf.AGENT.root_helper
def setup_privsep() -> None:
"""Use root helper (if present) to execute privileged commands"""
priv_context.init(root_helper=shlex.split(get_root_helper(cfg.CONF)))
CV = TypeVar('CV', bound=Callable)
def eventlet_lock(name: str) -> Callable[[CV], CV]:
sema = _semaphores[name]
def eventlet_lock_decorator(func: CV) -> CV:
def func_wrap(*args, **kwargs):
LOG.debug("Acquiring lock '%s' before executing %s" %
(name, func.__name__))
with sema:
LOG.debug("Acquired lock '%s' before executing %s" %
(name, func.__name__))
return func(*args, **kwargs)
return cast(CV, func_wrap)
return eventlet_lock_decorator
######################################################################
# This mirrors functionality in Neutron so that we're creating a name
# that Neutron can find for its agents.
DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX
######################################################################
# TODO(ijw): should be pulled from Neutron or Nova - this naming
# scheme is common between both
TAP_UUID_LEN = 11
def get_tap_name(uuid):
# type: (str) -> str
return n_const.TAP_DEVICE_PREFIX + uuid[0:TAP_UUID_LEN]
def get_bridge_name(uuid):
# type: (str) -> str
return 'br-' + uuid[0:TAP_UUID_LEN]
# This is our internal name and the other end neither knows or cares about
# it, only the bridge we put it in
def get_vpptap_name(uuid):
# type: (str) -> str
return 'vpp' + uuid[0:TAP_UUID_LEN]
def default_if_none(x, default):
return default if x is None else x
######################################################################
def VPP_TAG(tag):
return 'net-vpp.' + tag
# Interface tagging naming scheme :
# tap and vhost interfaces: port:<uuid>
# Uplink Connectivity: uplink:<net_type>.<seg_id>
# MAX_PHYSNET_LENGTH + the tag format must be <= the 64 bytes of a VPP tag
MAX_PHYSNET_LENGTH = 32
TAG_PHYSNET_IF_PREFIX = VPP_TAG('physnet:')
TAG_UPLINK_PREFIX = VPP_TAG('uplink:')
TAG_L2IFACE_PREFIX = VPP_TAG('port:')
def get_vhostuser_name(uuid):
return os.path.join(cfg.CONF.ml2_vpp.vhost_user_dir, uuid)
def physnet_if_tag(physnet_name):
return TAG_PHYSNET_IF_PREFIX + physnet_name
def decode_physnet_if_tag(tag):
if tag is None:
return None
m = re.match('^' + TAG_PHYSNET_IF_PREFIX + '([^.]+)$', tag)
return None if m is None else m.group(1)
def uplink_tag(physnet, net_type, seg_id):
return TAG_UPLINK_PREFIX + '%s.%s.%s' % (physnet, net_type, seg_id)
def decode_uplink_tag(tag):
"""Spot an uplink interface tag.
Return (net_type, seg_id) or None if not an uplink tag
"""
if tag is None:
return None # not tagged
m = re.match('^' + TAG_UPLINK_PREFIX + r'([^.]+)\.([^.]+)\.([^.]+)$', tag)
return None if m is None else (m.group(1), m.group(2), m.group(3))
def port_tag(port_uuid):
return TAG_L2IFACE_PREFIX + str(port_uuid)
def decode_port_tag(tag):
"""Spot a port interface tag
Return uuid or None if not a port interface tag.
"""
if tag is None:
return None # not tagged
m = re.match('^' + TAG_L2IFACE_PREFIX + '(' + n_const.UUID_PATTERN + ')$',
tag)
return None if m is None else m.group(1)
######################################################################
# Security group tag formats used to tag ACLs in VPP for
# re-identification on restart
# When leaving VPP and entering the VM
VPP_TO_VM = 1
# When leaving the VM and entering VPP
VM_TO_VPP = 0
VPP_TO_VM_MARK = 'from-vpp'
VM_TO_VPP_MARK = 'to-vpp'
def VPP_TO_VM_TAG(tag):
return tag + '.' + VPP_TO_VM_MARK
def VM_TO_VPP_TAG(tag):
return tag + '.' + VM_TO_VPP_MARK
def DIRECTION_TAG(tag, is_vm_ingress):
if is_vm_ingress:
return VPP_TO_VM_TAG(tag)
else:
return VM_TO_VPP_TAG(tag)
COMMON_SPOOF_TAG = VPP_TAG('common_spoof')
COMMON_SPOOF_VPP_TO_VM_TAG = VPP_TO_VM_TAG(COMMON_SPOOF_TAG)
COMMON_SPOOF_VM_TO_VPP_TAG = VM_TO_VPP_TAG(COMMON_SPOOF_TAG)
def common_spoof_tag(is_vm_ingress):
if is_vm_ingress:
return COMMON_SPOOF_VPP_TO_VM_TAG
else:
return COMMON_SPOOF_VM_TO_VPP_TAG
def decode_common_spoof_tag(tag):
"""Work out if this tag is one of our common spoof filter tags
"""
if COMMON_SPOOF_VPP_TO_VM_TAG == tag:
return 1
if COMMON_SPOOF_VM_TO_VPP_TAG == tag:
return 0
return None
SECGROUP_TAG = VPP_TAG('secgroup:')
def secgroup_tag(secgroup_id, is_vm_ingress):
base_tag = SECGROUP_TAG + secgroup_id
return DIRECTION_TAG(base_tag, is_vm_ingress)
def decode_secgroup_tag(tag):
# Matches the formats constructed earlier
m = re.match('^' + SECGROUP_TAG + '(' + n_const.UUID_PATTERN + r')\.(.*)$',
tag)
if m:
secgroup_id = m.group(1)
dirmark = m.group(2)
is_vm_ingress = dirmark == VPP_TO_VM_MARK
return secgroup_id, is_vm_ingress
return None, None
class UnsupportedInterfaceException(Exception):
"""Used when ML2 has tried to ask for a weird binding type."""
pass
class VPPForwarder(object):
"""Convert agent requirements into VPP calls
This class has no interaction with etcd; other classes have no
interaction with VPP. The job of this class is to turn the
demands of etcd's data into VPP constructs.
"""
def __init__(self,
physnets, # physnet_name: interface-name
mac_age,
vpp_cmd_queue_len=None):
self.vpp = vpp.VPPInterface(LOG, vpp_cmd_queue_len)
self.net_driver = network_interface.NetworkDriverManager(self)
self.physnets = physnets
self.mac_age = mac_age
# a Mapping of security groups to VPP ACLs
self.secgroups = {} # secgroup_uuid: VppAcl(ingress_idx, egress_idx)
# Security group UUID to the set of associated port UUIDs
self.remote_group_ports = defaultdict(set)
# Port UUID to its set of IP addresses
self.port_ips = defaultdict(set)
# Remote-group UUID to the set to security-groups that uses it
self.remote_group_secgroups = defaultdict(set)
# ACLs we ought to delete
self.deferred_delete_secgroups = set()
# Enable the GPE forwarder programming, if required
self.gpe: Optional[gpe.GPEForwarder]
if TYPE_GPE in cfg.CONF.ml2.type_drivers:
self.gpe = gpe.GPEForwarder(self)
else:
self.gpe = None
self.interfaces = {} # uuid: if idx
self.router_interfaces = {} # router_port_uuid: {}
self.router_external_interfaces = {} # router external interfaces
self.floating_ips = {} # floating_ip_uuid: {}
if cfg.CONF.ml2_vpp.enable_l3_ha:
# Router BVI (loopback) interface states for L3-HA
self.router_interface_states = {} # {idx: state} 1 = UP, 0 = DOWN
# VPP Router state variable is updated by the RouterWatcher
# The default router state is the BACKUP.
# If this node should be the master it will be told soon enough,
# and this will prevent us from having two masters on any restart.
self.router_state = False # True = Master; False = Backup
# mac_ip acls do not support atomic replacement.
# Here we create a mapping of sw_if_index to VPP ACL indices
# so we can easily lookup the ACLs associated with the interface idx
# sw_if_index: {"l34": [l34_acl_indxs], "l23": l23_acl_index }
self.port_vpp_acls: Dict[vpp.if_idx_t, dict] = defaultdict(dict)
# key: OpenStack port UUID; present when vhost-user is
# connected and removed when we delete things. May accumulate
# any other VPP interfaces too, but that's harmless.
self.port_connected: Set[str] = set()
self.vhost_ready_callback = None
eventlet.spawn_n(self.vhost_notify_thread)
# Thread to drain the queues for binding tap interfaces into Linux
# bridges
eventlet.spawn_n(self.tap_notify_thread)
# External devices detected by the device monitor
self.external_devices = eventlet.queue.Queue()
# Device monitor to ensure the tap interfaces are plugged into the
# right Linux bridge
self.async_devmon = device_monitor_async.AsyncDeviceMonitor()
self.async_devmon.on_add(self._consider_external_device)
# The worker will be in endless loop, so don't care the return value
self.async_devmon.start()
# Start Vhostsocket filemonitor to bind sockets as soon as they appear.
self.filemonitor = file_monitor.FileMonitor(
watch_pattern=n_const.UUID_PATTERN,
watch_dir=cfg.CONF.ml2_vpp.vhost_user_dir)
# Register to handle ON_CREATE event.
self.filemonitor.register_on_add_cb(
self.ensure_interface_for_vhost_socket_binding)
# Register to handle ON_DELETE event.
# We are expecting the port unbinding call flow to clean up vhost
# sockets, hence ignoring delete events on vhost file handle.
self.filemonitor.register_on_del_cb(lambda *args: None)
# Finally start the file monitor.
eventlet.spawn_n(self.filemonitor.run)
########################################
# Port resyncing on restart
def fix_physnets(self, physnets):
"""Fix or remove networks where uplinks have changed in config
- fixes uplink interfaces from VPP where they've changed in
config or where the config didn't fully get pushed
to VPPFowarder
- deletes interfaces and networks from VPP where the
the physical network is no longer configured
- evicts ports from bridges with no network
"""
# One uplink per network
uplink_ports_found = []
# One physnet can serve multiple uplinks
physnet_ports_found = {}
for f in self.vpp.get_interfaces():
# Find uplink ports on OpenStack networks
uplink_data = decode_uplink_tag(f['tag'])
if uplink_data is not None:
uplink_physnet, net_type, seg_id = uplink_data
uplink_ports_found.append([
uplink_physnet, net_type, seg_id,
f['sw_if_idx'],
f['sw_if_idx'] if f['sup_sw_if_idx'] is None
else f['sup_sw_if_idx']])
# Find physical network ports
physnet_name = decode_physnet_if_tag(f['tag'])
if physnet_name is not None:
physnet_ports_found[physnet_name] = f['sw_if_idx']
# Find physnets we intend according to the config
configured_physnet_interfaces = {}
for name, if_name in physnets.items():
# Can be 'None', that's fine as it won't match anything later
configured_physnet_interfaces[name] = \
self.vpp.get_ifidx_by_name(if_name)
LOG.debug('Configured physnets %s',
', '.join(sorted(configured_physnet_interfaces.keys())))
for uplink_physnet, net_type, seg_id, sw_if_idx, sup_sw_if_idx \
in uplink_ports_found:
# Delete networks with a physnet whose config changed
if (uplink_physnet not in configured_physnet_interfaces
or (sup_sw_if_idx !=
configured_physnet_interfaces[uplink_physnet])):
LOG.warning('Deleting outdated network in VPP: net type '
'%(type)s physnet %(physnet)s seg id %(seg)s, '
'physnet if %(physif)d uplink %(uplinkif)d',
{'type': net_type,
'physnet': uplink_physnet,
'seg': str(seg_id),
'physif': sup_sw_if_idx,
'uplinkif': sw_if_idx})
if uplink_physnet not in configured_physnet_interfaces:
LOG.warning('This physnet is no longer in the config')
else:
LOG.warning(
'This physnet now uses interface '
'%(idx)d (%(physnet_name)s)',
{'idx': configured_physnet_interfaces[uplink_physnet],
'physnet_name': physnets[uplink_physnet]})
# This will remove ports from bridges, which means
# that they may be rebound back into networks later
# or may be deleted if no longer used.
self.delete_network_bridge_on_host(net_type,
sw_if_idx,
sw_if_idx)
for name, if_idx in physnet_ports_found.items():
if configured_physnet_interfaces.get(name, None) != if_idx:
# This configuration has changed.
# Untag the original physnet interface, which is no
# longer used as a physnet
LOG.warning('Removing old physnet from VPP: '
'physnet %(physnet_name)s interface %(idx)s',
{'physnet_name': name,
'idx': str(if_idx)})
# In case there was a flat network, make sure the flat
# network bridge no longer exists
self.delete_network_bridge_on_host('flat', if_idx, if_idx)
self.vpp.set_interface_tag(if_idx, None)
# The remaining networks (with uplinks and bridge domains) are
# functional, and idempotent binding will do nothing to
# interfaces in the right bridges. It will fix those in the
# wrong bridges.
# Dead bridges have been deleted and binding
# will find a new home for the interfaces that still exist.
def find_bound_ports(self):
"""Assuming no local data, find bound ports in VPP
This analyses the tags to identify ports in VPP that
have been bound by this process before it restarted.
"""
bound_ports = set()
for f in self.vpp.get_interfaces():
# Find downlink ports
port_id = decode_port_tag(f['tag'])
if port_id is not None:
bound_ports.add(port_id)
return bound_ports
########################################
def vhost_notify_thread(self):
"""Find vhostuser connections with an attached VM
The moment of VM attachment is useful, as it's one of the
preconditions for notifying Nova a socket is ready. Watching
the vhostuser data inside VPP has a performance impact on
forwarding, so instead we watch the kernel's idea of which
vhostuser connections are properly opened.
Having two open sockets is 99% ready - technically, the interface
is ready when VPP has mapped its memory, but these two events are
nearly contemporaenous, so this is good enough.
"""
dirname = cfg.CONF.ml2_vpp.vhost_user_dir
# We need dirname to have precisely one trailing slash.
dirname = dirname.rstrip('/') + '/'
while True:
opens: Dict[str, int] = defaultdict(int)
with open('/proc/net/unix') as content:
# Track unix sockets in vhost directory that are opened more
# than once
for f in content:
# Problems with fnames with spaces in, though
_, fname = f.rsplit(' ', 1)
if fname.startswith(dirname):
fname = fname[len(dirname):].rstrip("\n")
opens[fname] = opens[fname] + 1
# Report on any sockets that are open exactly twice (VPP + KVM)
# (note list clone so that we can delete entries)
for f in list(opens.keys()):
if opens[f] != 2:
del opens[f]
open_names: Set[str] = set(opens.keys())
open_notifications: Set[str] = open_names - self.port_connected
# .. we don't have to notify the port drops, that's fine
# Update this *before* making callbacks so that this register is up
# to date
self.port_connected = open_names
if self.vhost_ready_callback:
for uuid in open_notifications:
self.vhost_ready_callback(uuid)
eventlet.sleep(1)
def vhostuser_linked_up(self, uuid):
return uuid in self.port_connected
def vhostuser_unlink(self, uuid):
self.port_connected.discard(uuid)
########################################
def ifup(self, ifidx):
"""Proxy for VPP's ifup."""
self.vpp.ifup(ifidx)
########################################
def get_if_for_physnet(self, physnet):
"""Find (and mark used) the interface for a physnet"""
ifname = self.physnets.get(physnet, None)
if ifname is None:
LOG.error('Physnet %s requested but not in config',
physnet)
return None, None
ifidx = self.vpp.get_ifidx_by_name(ifname)
if ifidx is None:
LOG.error('Physnet %s interface %s does not '
'exist in VPP', physnet, ifname)
return None, None
self.vpp.set_interface_tag(ifidx, physnet_if_tag(physnet))
return ifname, ifidx
def delete_network_bridge_on_host(
self, net_type: str,
bridge_domain_id: vpp.br_idx_t,
uplink_if_idx: vpp.if_idx_t) -> None:
"""Delete a bridge corresponding to a network from VPP
Usable on restart - uses nothing but the data in VPP.
"""
if bridge_domain_id in self.vpp.get_bridge_domains():
# If there are ports still in this network, disable them
# They may be deleted later (if at startup) or they may
# be rebound to another bridge domain
if_idxes = self.vpp.get_ifaces_in_bridge_domain(bridge_domain_id)
# When this bridge domain is for an OpenStack flat network, the
# uplink interface may be a physical interface, i.e. not VLAN-based
# sub-interfaces. In this case, we will not bring down the uplink
# interface, and always leave it UP.
if_idxes_without_uplink = \
[i for i in if_idxes if i != uplink_if_idx]
# At startup, this is downing the interfaces in a bridge that
# is no longer required. However, in free running, this
# should never find interfaces at all - they should all have
# been unbound before the deletion. (If it does find them,
# the removal of interfaces is probably the best thing we can
# do, but they may not stay down if it races with the binding
# code.)
self.vpp.ifdown(*if_idxes_without_uplink)
self.vpp.delete_from_bridge(*if_idxes)
self.vpp.delete_bridge_domain(bridge_domain_id)
# The physnet is gone so no point in keeping the vlan sub-interface
# TODO(onong): VxLAN
if net_type == 'vlan':
if uplink_if_idx is not None:
self.vpp.delete_vlan_subif(uplink_if_idx)
########################################
# stolen from LB driver
def _bridge_exists_and_ensure_up(self, bridge_name):
"""Check if the bridge exists and make sure it is up."""
br = ip_lib.IPDevice(bridge_name)
br.set_log_fail_as_error(False)
try:
# If the device doesn't exist this will throw a RuntimeError
br.link.set_up()
except RuntimeError:
return False
return True
def ensure_kernel_bridge(self, bridge_name):
"""Create a bridge unless it already exists."""
# _bridge_exists_and_ensure_up instead of device_exists is used here
# because there are cases where the bridge exists but it's not UP,
# for example:
# 1) A greenthread was executing this function and had not yet executed
# "ip link set bridge_name up" before eventlet switched to this
# thread running the same function
# 2) The Nova VIF driver was running concurrently and had just created
# the bridge, but had not yet put it UP
if not self._bridge_exists_and_ensure_up(bridge_name):
bridge_device = bridge_lib.BridgeDevice.addbr(bridge_name)
bridge_device.setfd(0)
bridge_device.disable_stp()
bridge_device.disable_ipv6()
bridge_device.link.set_up()
else:
bridge_device = bridge_lib.BridgeDevice(bridge_name)
return bridge_device
# TODO(ijw): should be checking this all succeeded
# end theft
########################################
def _consider_external_device(self, dev_name):
"""See if we need to take action when a net device is created
This function will be called as a callback when a new interface is
created in Linux kernel. We will filter for tap interfaces created by
OpenStack, and those will be added to the bridges that we create on the
Neutron side of things.
"""
match = re.search(r'tap[0-9a-f]{8}-[0-9a-f]{2}', dev_name)
if not match:
return
# TODO(ijw) will act upon other mechanism drivers' taps
# Add the detected external device to be handled by the port-watcher
self.external_devices.put(dev_name)
def tap_notify_thread(self):
"""Ensure detected external tap devices are added to the bridge.
All detected external devices are queued in the external_devices
data set. So handle it in this method to ensure that these are added
to the bridge.
"""
while True:
try:
dev_name = self.external_devices.get()
port_id = dev_name[3:]
bridge_name = "br-%s" % port_id
self.ensure_tap_in_bridge(dev_name, bridge_name)
except Exception:
LOG.exception("Error while binding tap interface %s", dev_name)
def ensure_tap_in_bridge(self, tap_name, bridge_name):
"""Add a TAP device to a Linux kernel bridge
Defend against this having been done already (common on restart)
and this missing a requirement (common when plugging external
tap interfaces).
"""
bridge = bridge_lib.BridgeDevice(bridge_name)
bridge.set_log_fail_as_error(False)
if bridge.exists() and ip_lib.device_exists(tap_name) \
and not bridge.owns_interface(tap_name):
try:
bridge.addif(tap_name)
except Exception as ex:
# External TAP interfaces created by DHCP or L3 agent will be
# added to corresponding Linux Bridge by vpp-agent to talk to
# VPP. During a regular port binding process, there are two
# code paths calling this function for adding the interface to
# the Linux Bridge, which may potentially cause a race
# condition and a non-harmful traceback in the log. Also, it
# is quite possible that a bridge may have been deleted by the
# normal port unbinding process before this code tries to add
# the tap interafce.
# The fix will eliminate the non-harmful traceback in the log.
match1 = re.search(r"Stderr\: device (vpp|tap)[0-9a-f]{8}-"
"[0-9a-f]{2} is already a member of a "
"bridge; can't enslave it to bridge br-"
r'[0-9a-f]{8}-[0-9a-f]{2}\.', str(ex))
match2 = re.search(r"Stderr: Error: argument \"br-"
"[0-9a-f]{8}-[0-9a-f]{2}\" is wrong: "
"Device does not exist", str(ex))
if not match1 and not match2:
LOG.exception("Can't add interface %s to bridge %s: %s" %
(tap_name, bridge_name, str(ex)))
def _ensure_kernelside_tap(self, bridge_name, tap_name, int_tap_name):
# This is the kernel-side config (and we should not assume
# that, just because the interface exists in VPP, it has
# been done previously - the crash could occur in the
# middle of the process)
# Running it twice is harmless. Never running it is
# problematic.
# TODO(ijw): someone somewhere ought to be sorting
# the MTUs out
self.ensure_kernel_bridge(bridge_name)
# This is the device that we just created with VPP
self.ensure_tap_in_bridge(int_tap_name, bridge_name)
# This is the external TAP device that will be
# created by Nova or an agent, say the DHCP agent,
# later in time.
self.ensure_tap_in_bridge(tap_name, bridge_name)
# This is called by the (eventlet) inotify functions and the (eventlet)
# etcd functionality, and thus needs an eventlet-based lock. We've found
# oslo_concurrency thinks that, because threading is unpatched, a threading
# lock is required, but this ends badly.
@eventlet_lock('ensure-interface-lock')
def ensure_interface_on_host(self, if_type, uuid, mac=None):
"""Create or update vpp interface on host based on if_type.
Depending on the if_type (maketap, plugtap or vhostuser) call vpp papi
to do vpp side of the plumbing. This will change depending on the
if_type. The interfaces are tagged saved in the internal dict for easy
retrieval.
The call is idempotent if the uuid and its associated
interface is already present.
:return: dict indexed on uuid
"""
if uuid in self.interfaces:
# It's definitely there, we made it ourselves
pass
else:
# TODO(ijw): it may exist, but we may need to create it
# - and what exists may be wrong so we may have to
# recreate it
# TODO(ijw): idempotency
LOG.debug('Creating port %s as type %s with mac %s',
uuid, if_type, mac)
# Deal with the naming conventions of interfaces
# TODO(ijw): naming not obviously consistent with
# Neutron's naming
tap_name = get_tap_name(uuid)
# TODO(ijw) structured type
props: dict
if if_type == 'tap':
bridge_name = get_bridge_name(uuid)
int_tap_name = get_vpptap_name(uuid)
props = {'bridge_name': bridge_name,
'ext_tap_name': tap_name,
'int_tap_name': int_tap_name}
elif if_type == 'vhostuser':
path = get_vhostuser_name(uuid)
props = {'path': path}
else:
raise UnsupportedInterfaceException()
tag = port_tag(uuid)
props['bind_type'] = if_type
# NB(onong): In case the if_type is vhostuser then this is the
# neutron port's/VM's mac and it has implications for gpe networks
# so please be mindful before altering this
props['mac'] = mac
iface_idx = self.vpp.get_ifidx_by_tag(tag)
if iface_idx is not None:
# The agent has at some point reset, but before the reset
# this interface was at least created. A previous sweep
# will have ensured it's the right sort of interface.
LOG.debug('port %s recovering existing port in VPP',
uuid)
else:
# Make an interface, and tag it for refinding.
LOG.debug('binding port %s as type %s' %
(uuid, if_type))
if if_type == 'tap':
iface_idx = self.vpp.create_tap(int_tap_name, mac=None,
tag=tag)
elif if_type == 'vhostuser':
iface_idx = self.vpp.create_vhostuser(path, tag)
if if_type == 'tap':
# Plugtap interfaces belong in a kernel bridge, and we need
# to monitor for the other side attaching.
self._ensure_kernelside_tap(bridge_name,
tap_name,
int_tap_name)
props['iface_idx'] = iface_idx
self.interfaces[uuid] = props
return self.interfaces[uuid]
def ensure_interface_for_vhost_socket_binding(self, name):
"""Ensure vpp interface for imminent vhost socket binding.
Somebody has dropped a file in the vhost_socket_directory which matched
our watch pattern (Neutron port uuid). We are expecting an imminent
vhost socket binding (from presumably Nova), so lets get ahead of the
curve and create a vhost socket for it.
Inteface name is the vhost socket file name and since we don't know
the mac, let vhost interface create function make one.
"""
LOG.debug("Calling VPP interface creation on vhost socket with props "
"vif_type: %s , uuid: %s ", 'vhostuser', name)
self.ensure_interface_on_host('vhostuser', uuid=name, mac=None)
def ensure_interface_in_vpp_bridge(self, net_br_idx, iface_idx):
"""Idempotently ensure that a bridge contains an interface
The interface must exist, but we ensure the bridge exists and
that the interface is in it
"""
self.ensure_bridge_domain_in_vpp(net_br_idx)
# Adding an interface to a bridge does nothing if it's
# already in there, and moves it if it's in another
self.vpp.add_to_bridge(net_br_idx, iface_idx)
def ensure_bridge_domain_in_vpp(self, bridge_idx):
if bridge_idx not in self.vpp.get_bridge_domains():
LOG.debug('Creating vpp bridge domain %s', bridge_idx)
self.vpp.create_bridge_domain(bridge_idx, self.mac_age)
def bind_interface_on_host(self, if_type, uuid, mac, physnet,
net_type, seg_id):
"""Configure the interface in VPP per the binding request.
Because we may be restarting the agent on a VPP that is already
running, do this defensively: interfaces that we do not know
about may have had some of their binding done. Acting in this
way, we can be sure that the interface is now correctly bound
regardless of what may have transpired previously.
This may be called at any time because of a request from
the mechanism driver, or it may be called during resync
when state already exists in VPP but in either case we fix
what we find and draw out from that a picture of the current
state, including whether (in the case of vhostuser interfaces)
the far end of the socket has attached to VPP.
"""
# In order, we create the network bridge, the interface for
# the far end, and we add it to the bridge. Any of these
# may have been done before; the functions we call correct
# any previous state they find.
net_data = self.net_driver.ensure_network(physnet, net_type, seg_id)
if net_data is None:
LOG.error('port bind is not possible as physnet '
'could not be configured')
# Returning None allows us to deal with the uplink
# side of a failed binding in the caller.
# For resyncs, the port exists but it's not in a bridge domain
# and is down, which is the best we can offer.
return None
if net_type == TYPE_GPE and self.gpe is None:
LOG.error('port bind - GPE is not enabled')
return None
net_br_idx = net_data['bridge_domain_id']
props = self.ensure_interface_on_host(if_type, uuid, mac)
iface_idx = props['iface_idx']
self.ensure_interface_in_vpp_bridge(net_br_idx, iface_idx)
# Ensure local mac to VNI mapping for GPE
if net_type == TYPE_GPE and self.gpe is not None:
LOG.debug('Adding local GPE mapping for seg_id:%s and mac:%s',
seg_id, mac)
self.gpe.add_local_gpe_mapping(seg_id, mac)
props['net_data'] = net_data
LOG.debug('Bound vpp interface with sw_idx:%s on '
'bridge domain:%s',
iface_idx, net_br_idx)
return props
def unbind_interface_on_host(self, uuid):
"""Detach an interface, clean up structures
This removes and destroys the interface and the network
if it is no longer used.
This is *not* used in rebinding, as this requires the data
we stored about an interface when it was bound.
"""
if uuid not in self.interfaces:
LOG.debug('unknown port %s unbinding request - ignored',
uuid)
else:
props = self.interfaces[uuid]
net = props.get('net_data')
self.clean_interface_from_vpp(uuid, props)
# Delete the port ip address from remote_group_id list
self.port_ips.pop(uuid, None)
if net is not None:
# Check if this is the last interface on host, safe if this
# interface is incompletely bound
for interface in self.interfaces.values():
if net == interface.get('net_data'):
# safe if the other interface is not bound
break
else:
# Network is not used on this host, delete it
self.net_driver.delete_network(net['physnet'],
net['network_type'],
net['segmentation_id'])
def bind_subport_on_host(self, parent_port, subport_data):
"""Bind the subport of a bound parent vhostuser port."""
# We ensure parent port binding before calling this method.
subport_uuid = subport_data['port_id']
subport_seg_id = subport_data['segmentation_id']
# parent vhostuser intf
parent_props = self.interfaces[parent_port]
parent_if_idx = parent_props['iface_idx']
# Ensure that the uplink and the BD's are setup
physnet = subport_data['physnet']
uplink_seg_type = subport_data['uplink_seg_type']
uplink_seg_id = subport_data['uplink_seg_id']
LOG.debug('trunk: ensuring subport network on host '
'physnet %s, uplink_seg_type %s, uplink_seg_id %s',
physnet, uplink_seg_type, uplink_seg_id)
# Ensure an uplink for the subport
# Use the uplink physnet, uplink_seg_id & seg_type
net_data = self.net_driver.ensure_network(physnet,
uplink_seg_type,
uplink_seg_id)
if net_data is None:
LOG.error('trunk sub-port binding is not possible as the '
'physnet could not be configured for subport')
return None
# fetch if the subport interface already in vpp
subport_tag = port_tag(subport_uuid)
subport_if_idx = self.vpp.get_ifidx_by_tag(subport_tag)
net_br_idx = net_data['bridge_domain_id']
if subport_if_idx is not None:
# It's already there and we created it
LOG.debug('Recovering existing trunk subport %s in VPP',
subport_uuid)
# Ensure that the recovered subport is in vpp bridge
self.ensure_interface_in_vpp_bridge(net_br_idx, subport_if_idx)
else:
# create subport vhostuser intf and ensure it's in vpp bridge
LOG.debug('trunk: ensuring subport interface on host '
'parent_if_idx %s, seg_id %s', parent_if_idx,
subport_seg_id)
subport_if_idx = self.vpp.create_vlan_subif(parent_if_idx,
subport_seg_id)
self.ensure_interface_in_vpp_bridge(net_br_idx, subport_if_idx)
# set subport tag, so we can find it during resyncs
self.vpp.set_interface_tag(subport_if_idx, subport_tag)
LOG.debug("Bound subport in vpp with sw_idx: %s on BD: %s ",
subport_if_idx, net_br_idx)
# Add subport props to interfaces along with parent port uuid
self.interfaces[subport_uuid] = {'iface_idx': subport_if_idx,
'net_data': net_data,
'mac': parent_props['mac'],
'bind_type': 'vhostuser',
'path': parent_props['path'],
'parent_uuid': parent_port
}
if 'trunk' not in parent_props:
LOG.debug('Setting trunk attr value in parent port props for '
'subport %s', subport_uuid)
parent_props['trunk'] = set([subport_uuid])
else:
LOG.debug('Adding subport to trunk parent props for subport %s ',
subport_uuid)
parent_props['trunk'].add(subport_uuid)
return self.interfaces[subport_uuid]
def unbind_subport_on_host(self, subport):
"""Unbind the vhostuser subport in VPP."""
if subport not in self.interfaces:
LOG.debug('unknown subport %s unbinding request - ignored',
subport)
else:
LOG.debug("Unbinding subport %s on host", subport)
parent_port = self.interfaces[subport]['parent_uuid']
LOG.debug("Parent port id of subport %s is %s",
subport, parent_port)
self.unbind_interface_on_host(subport)
self.interfaces[parent_port]['trunk'].remove(subport)
def clean_interface_from_vpp(self, uuid, props):
# Don't unbind a trunk port with subports
if 'trunk' in props and len(props['trunk']) > 0:
LOG.debug('Waiting for subports %s to be unbound before '
'unbinding trunk port %s', props, uuid)
return
iface_idx = props['iface_idx']
LOG.debug('unbinding port %s, recorded as type %s',
uuid, props['bind_type'])
# We no longer need this interface. Specifically if it's
# a vhostuser interface it's annoying to have it around
# because the VM's memory (hugepages) will not be
# released. So, here, we destroy it.
# GPE code in VPP does not clean up its data structures
# properly if the port
# is deleted from the bridge without first removing the
# local GPE eid mapping. So remove local mapping,
# if we are bound using GPE
if props['net_data']['network_type'] == TYPE_GPE \
and self.gpe is not None:
mac = props['mac']
seg_id = props['net_data']['segmentation_id']
self.gpe.delete_local_gpe_mapping(seg_id, mac)
if props['bind_type'] == 'vhostuser':
# Delete port from vpp_acl map if present
if iface_idx in self.port_vpp_acls:
self.remove_acls_on_port(iface_idx)
self.remove_mac_ip_acl_on_port(iface_idx)
del self.port_vpp_acls[iface_idx]
# remove port from bridge (sets to l3 mode) prior to deletion
self.vpp.delete_from_bridge(iface_idx)
# If it is a subport of a trunk port then delete the corresponding
# vlan sub-interface. Otherwise it is a parent port or a normal
# vhostuser port and we delete the vhostuser interface itself.
if 'parent_uuid' not in props:
self.vpp.delete_vhostuser(iface_idx)
else:
self.vpp.delete_vlan_subif(iface_idx)
# This interface is no longer connected if it's deleted
# RACE, as we may call unbind BEFORE the vhost user
# interface is notified as connected to qemu
self.vhostuser_unlink(uuid)
elif props['bind_type'] == 'tap':
# remove port from bridge (sets to l3 mode) prior to deletion
self.vpp.delete_from_bridge(iface_idx)
self.vpp.delete_tap(iface_idx)
bridge_name = get_bridge_name(uuid)
class FailableBridgeDevice(bridge_lib.BridgeDevice):
# For us, we expect failing commands and want them ignored.
def _ip_link(self, cmd):
cmd = ['ip', 'link'] + cmd
ip_wrapper = ip_lib.IPWrapper(self.namespace)
return ip_wrapper.netns.execute(
cmd,
check_exit_code=False,
log_fail_as_error=False,
run_as_root=True
)
bridge = FailableBridgeDevice(bridge_name)
if bridge.exists():
# These may fail, don't care much
if bridge.owns_interface(props['int_tap_name']):
bridge.delif(props['int_tap_name'])
if bridge.owns_interface(props['ext_tap_name']):
bridge.delif(props['ext_tap_name'])
bridge.link.set_down()
bridge.delbr()
else:
LOG.error('Unknown port type %s during unbind',
props['bind_type'])
self.interfaces.pop(uuid)
# TODO(ijw) this *really* needs typing with the return value structure.
def _to_acl_rule(self, r, d):
"""Convert a SecurityGroupRule to VPP ACL rule.
Arguments:
r - SecurityGroupRule NamedTuple Object
SecurityGroupRule = namedtuple(
'SecurityGroupRule',
['is_ipv6',
'remote_ip_addr',
'ip_prefix_len',
'protocol',
'port_min',
'port_max'])
TODO(ijw): is_ipv6 appears to be the v6-ness of remote_ip_addr
d - Direction: 0 ==> ingress, 1 ==> egress
Default == 2
Return: VPP-formatted ACL Rule
"""
acl_rule = {}
# a - Permit-Action: 1 == permit, 2 == reflexive;
if not reflexive_acls:
a = 1
# Enable reflexive ACLs for all TCP/UDP and IP traffic
elif r.protocol in [6, 17, 0]:
a = 2
else:
a = 1 # Disable reflexive for other traffic such as ICMP etc.
acl_rule['is_permit'] = a
acl_rule['proto'] = r.protocol
# for ingress: secgroup remote_ip == Source IP
# for egress: secgroup remote_ip == Destination IP
# Port ranges are always destination port ranges for TCP/UDP
# Set source port range to permit all ranges from 0 to 65535
if d == 0:
# OpenStack may provide an interface with subnet (i.e. an
# address *on* a network and not an address *of* the
# network). VPP requires the network address.
acl_rule['src_prefix'] = \
ip_interface((r.remote_ip_addr, r.ip_prefix_len,)).network
acl_rule['dst_prefix'] = \
ip_network("::/0" if r.is_ipv6 else "0.0.0.0/0")
else:
acl_rule['src_prefix'] = \
ip_network("::/0" if r.is_ipv6 else "0.0.0.0/0")
# OpenStack may provide an interface with subnet (i.e. an
# address *on* a network and not an address *of* the
# network). VPP requires the network address.
acl_rule['dst_prefix'] = \
ip_interface((r.remote_ip_addr, r.ip_prefix_len,)).network
# Handle ICMP/ICMPv6
if r.protocol in [1, 58]:
if r.port_min == -1: # All ICMP Types and Codes [0-255]
acl_rule['srcport_or_icmptype_first'] = 0
acl_rule['srcport_or_icmptype_last'] = 255
acl_rule['dstport_or_icmpcode_first'] = 0
acl_rule['dstport_or_icmpcode_last'] = 255
elif r.port_max == -1: # All ICMP codes for an ICMP Type
acl_rule['srcport_or_icmptype_first'] = r.port_min
acl_rule['srcport_or_icmptype_last'] = r.port_min
acl_rule['dstport_or_icmpcode_first'] = 0
acl_rule['dstport_or_icmpcode_last'] = 255
else: # port_min == ICMP Type and port_max == ICMP Code
acl_rule['srcport_or_icmptype_first'] = r.port_min
acl_rule['srcport_or_icmptype_last'] = r.port_min
acl_rule['dstport_or_icmpcode_first'] = r.port_max
acl_rule['dstport_or_icmpcode_last'] = r.port_max
# Handle TCP/UDP protocols
elif r.protocol in [6, 17]:
acl_rule['dstport_or_icmpcode_first'] = \
default_if_none(r.port_min, 0)
acl_rule['dstport_or_icmpcode_last'] = \
default_if_none(r.port_max, 65535)
# Allow all ranges for source ports
acl_rule['srcport_or_icmptype_first'] = 0
acl_rule['srcport_or_icmptype_last'] = 65535
# Handle all protocols - All IPv4 and IPv6 TCP/UDP traffic
elif r.protocol == 0:
acl_rule['dstport_or_icmpcode_first'] = 0
acl_rule['dstport_or_icmpcode_last'] = 65535
acl_rule['srcport_or_icmptype_first'] = 0
acl_rule['srcport_or_icmptype_last'] = 65535
return acl_rule
# Reverse rules are only added if reflexive_acls is set to False
def _reverse_rule(self, r):
"""Compose and return a reverse rule for r if reflexive_acls is False
Arguments:
r - rule dictionary returned by the _to_acl_rule(r) method above
swap src and dst IP and port ranges to match return traffic for r
"""
acl_rule = {}
# 1 == Permit rule and 0 == deny rule
acl_rule['is_permit'] = r['is_permit']
acl_rule['proto'] = r['proto']
# All TCP/UDP IPv4 and IPv6 traffic
if r['proto'] in [6, 17, 0]:
acl_rule['src_prefix'] = r['dst_prefix']
acl_rule['dst_prefix'] = r['src_prefix']
# Swap port range values
acl_rule['srcport_or_icmptype_first'] = \
r['dstport_or_icmpcode_first']
acl_rule['srcport_or_icmptype_last'] = \
r['dstport_or_icmpcode_last']
acl_rule['dstport_or_icmpcode_first'] = \
r['srcport_or_icmptype_first']
acl_rule['dstport_or_icmpcode_last'] = \
r['srcport_or_icmptype_last']
return acl_rule
def acl_add_replace_on_host(self, secgroup):
"""Adds/Replaces the secgroup ACL within VPP
Arguments:
secgroup - SecurityGroup NamedTuple object
namedtuple('SecurityGroup', ['id', 'ingress_rules', 'egress_rules'])
"""
# Default action == ADD if the acl indexes are set to ~0
# VPP ACL indexes correspond to ingress and egress security
# group rules
in_acl_idx, out_acl_idx = \
self.secgroups.get(secgroup.id,
VppAcl(0xffffffff, 0xffffffff))
in_acl_rules, out_acl_rules = (
[self._to_acl_rule(r, 0) for r in secgroup.ingress_rules],
[self._to_acl_rule(r, 1) for r in secgroup.egress_rules])
# If not reflexive_acls create return rules for ingress and egress
# IPv4/IPv6 tcp/udp traffic
# Exclude ICMP
if not reflexive_acls:
in_acl_return_rules, out_acl_return_rules = (
[self._reverse_rule(r) for r in in_acl_rules
if r['proto'] in [6, 17, 0]],
[self._reverse_rule(r) for r in out_acl_rules
if r['proto'] in [6, 17, 0]]
)
in_acl_rules = in_acl_rules + out_acl_return_rules
out_acl_rules = out_acl_rules + in_acl_return_rules
in_acl_idx = self.vpp.acl_add_replace(acl_index=in_acl_idx,
tag=secgroup_tag(secgroup.id,
VPP_TO_VM),
rules=in_acl_rules)
out_acl_idx = self.vpp.acl_add_replace(acl_index=out_acl_idx,
tag=secgroup_tag(secgroup.id,
VM_TO_VPP),
rules=out_acl_rules)
self.secgroups[secgroup.id] = VppAcl(in_acl_idx, out_acl_idx)
# If this is on the pending delete list it shouldn't be now
self.deferred_delete_secgroups.discard(secgroup.id)
def acl_delete_on_host(self, secgroup):
"""Deletes the ingress and egress VPP ACLs on host for secgroup
This may delete up front or it may defer (and delete when it's
next called, which is adequately fast) if there's a port using
the ACL.
Arguments:
secgroup - OpenStack security group ID
"""
# Attempt both the current ACL and any more ACLs that have been
# previously deferred:
self.deferred_delete_secgroups.add(secgroup)
remaining_secgroups = set()
for secgroup in self.deferred_delete_secgroups:
try:
secgroup_acls = self.secgroups[secgroup]
except KeyError:
LOG.error("secgroup_watcher: received request to delete "
"an unknown security group %s", secgroup)
# This security group doesn't exist, don't add to the
# deferred list
continue
try:
used = False
for iface in self.vpp.get_interfaces():
in_acls, out_acls = self.vpp.get_interface_acls(
iface['sw_if_idx'])
for acl_idx in secgroup_acls:
if acl_idx in in_acls or acl_idx in out_acls:
used = True
break
if used:
LOG.debug('deferring delete of acls for secgroup %s'
' as a port is using them', secgroup)
remaining_secgroups.add(secgroup)
else:
for acl_idx in secgroup_acls:
self.vpp.acl_delete(acl_index=acl_idx)
del self.secgroups[secgroup]
# Discard the security group from the remote group dict
for remote_group in self.remote_group_secgroups:
self.remote_group_secgroups[
remote_group].discard(secgroup)
except Exception as e:
LOG.exception("Exception while deleting ACL %s", e)
# We could defer this again but it's probably better
# we move on. Orphaned ACLs are not the end of the world.
remaining_secgroups.add(secgroup)
self.deferred_delete_secgroups = remaining_secgroups
def populate_secgroup_acl_mappings(self):
"""From vpp acl dump, populate the secgroups to VppACL mapping.
Get a dump of existing vpp ACLs that are tagged, by tag
Decode tag info
populate secgroups data structure relating UUID of secgroup to ACL
self.secgroups = {secgroup_id : VppAcl(in_idx, out_idx)}
"""
LOG.debug("Populating secgroup to VPP ACL map..")
# Clear existing secgroups to ACL map for sanity
self.secgroups = {}
# Example of the acl_map data
# acl_map: {'net-vpp.secgroup:<uuid>.from-vpp' : acl_idx
# 'net-vpp.secgroup:<uuid>.to-vpp' : acl_idx,
# 'net-vpp.common_spoof.from-vpp': acl_idx }
acl_map = self.get_secgroup_acl_map()
for item, acl_idx in acl_map.items():
# Tags can be one of ours, or one something else set
# decode_* functions attempt to match the tags to one of our
# formats, and returns None if that's not a format it matches.
secgroup_id, direction = decode_secgroup_tag(item)
if secgroup_id is None:
# Check if this is one of our common spoof ACL tag
# If so, get the tag direction and set the secgroup_id to
# COMMON_SPOOF_TAG so the correct spoof ACL can be read
direction = decode_common_spoof_tag(item)
if direction is not None:
# But it is a valid spoof tag
secgroup_id = COMMON_SPOOF_TAG
ingress = direction == VPP_TO_VM
else: # one of our valid secgroup ACL tag
ingress = direction == VPP_TO_VM
if secgroup_id is None:
# This is neither a security group or a spoof
# - so it's not installed by the mechdriver at all
continue
vpp_acl = self.secgroups.get(secgroup_id,
VppAcl(0xffffffff, 0xffffffff))
# secgroup_id will be missing first pass, and should be
# completed on the second round through.
if ingress:
self.secgroups[secgroup_id] = vpp_acl._replace(
in_idx=acl_idx)
else:
self.secgroups[secgroup_id] = vpp_acl._replace(
out_idx=acl_idx)
if self.secgroups == {}:
LOG.debug("We recovered an empty secgroups "
"to acl mapping. Possible reason: vpp "
"may have been restarted on host.")
# py3 note: in py3 keys() does not return a list but the following
# seems to work fine. Enclose in list() is problems arise.
return self.secgroups.keys()
def get_secgroup_acl_map(self):
"""Read VPP ACL tag data, construct and return an acl_map based on tag
acl_map: {secgroup_tag : acl_idx}
"""
acl_map = {}
try:
for acl_index, tag in self.vpp.get_acl_tags():
# TODO(ijw): identify that this is one of our tags
id, direction = decode_secgroup_tag(tag)
if id is not None:
acl_map[tag] = acl_index
else:
direction = decode_common_spoof_tag(tag)
if direction is not None:
acl_map[tag] = acl_index
# Not all ACLs have tags, but ACLs we own will
# have them and they will be decodeable. Ignore
# any externally created ACLs, they're not our problem.
except Exception:
LOG.exception("Exception getting acl_map from vpp acl tags")
raise
return acl_map
def maybe_set_acls_on_port(self, secgroup_ids, sw_if_index):
"""Compute a vector of input/output ACLs and set it on the VPP port.
Arguments:
secgroup_ids - OpenStack Security Group IDs
sw_if_index - VPP software interface index on which the ACLs will
be set
This method checks the global secgroups to acl mapping to
figure out the ACL indexes associated with the secgroup. It
then composes the acl vector and programs the port using vppf.
If the secgroup cannot be found or if the ACL index is invalid
i.e. 0xffffffff it will return False. This happens mostly in
agent restart situations when the secgroups mapping is still
being populated by the secgroup watcher thread, but since the
port and secgroup threads are independent it can happen at any
moment.
"""
# A list of VppAcl namedtuples to be set on the port
vpp_acls = []
for secgroup_id in secgroup_ids:
acl = self.secgroups.get(secgroup_id)
# If any one or both indices are invalid wait for a valid acl
if (not acl or
acl.in_idx == 0xFFFFFFFF or
acl.out_idx == 0xFFFFFFFF):
LOG.debug("Still waiting for a valid vpp acl "
"corresponding to secgroup %s" % secgroup_id)
return False
else:
vpp_acls.append(acl)
self._set_acls_on_vpp_port(vpp_acls, sw_if_index)
return True
def _set_acls_on_vpp_port(self, vpp_acls, sw_if_index):
"""Build a vector of VPP ACLs and set it on the port
Arguments -
vpp_acls - a list of VppAcl(in_idx, out_idx) namedtuples to be set
on the interface. An empty list '[]' deletes all user
defined acls from the interface and retains only the spoof
ACL
"""
# Initialize lists with anti-spoofing vpp acl indices
spoof_acl = self.spoof_filter_on_host()
# input acl on vpp filters egress traffic from vm and viceversa
input_acls = [spoof_acl.out_idx]
output_acls = [spoof_acl.in_idx]
if vpp_acls:
for acl in vpp_acls:
input_acls.append(acl.out_idx) # in on vpp == out on vm
output_acls.append(acl.in_idx) # out on vpp == in on vm
# Build the vpp ACL vector
acls = input_acls + output_acls
# (najoy) At this point we just keep a mapping of acl vectors
# associated with a port and do not check for any repeat application.
self.vpp.set_acl_list_on_interface(sw_if_index,
input_acls, output_acls)
self.port_vpp_acls[sw_if_index]['l34'] = acls
def set_mac_ip_acl_on_vpp_port(self, mac_ips, sw_if_index):
"""Set the mac-filter on VPP port
Arguments:
mac_ips - A list of tuples of (mac_address, ip_address)
sw_if_index - Software index ID of the VPP port
"""
def _pack_mac(mac_address):
"""Pack a mac_address into binary."""
return binascii.unhexlify(mac_address.replace(':', ''))
src_mac_mask = _pack_mac('FF:FF:FF:FF:FF:FF')
mac_ip_rules = []
for mac, ip in mac_ips: # ip can be an address (or) a network/prefix
# TODO(ijw): is it ever an interface rather than a network address?
# This is the struct the VPP API accepts: note the packed address
mac_ip_rules.append(
{'is_permit': 1,
'src_mac': _pack_mac(mac),
'src_mac_mask': src_mac_mask,
'src_prefix': ip_network(ip)})
# get the current mac_ip_acl on the port if_any
port_mac_ip_acl = None
try:
port_mac_ip_acl = self.port_vpp_acls[sw_if_index]['l23']
except KeyError:
pass # There may not be an ACL on the interface
acl_index = self.vpp.macip_acl_add(rules=mac_ip_rules,
count=len(mac_ip_rules))
self.vpp.set_macip_acl_on_interface(sw_if_index=sw_if_index,
acl_index=acl_index,
)
if port_mac_ip_acl: # Delete the previous macip ACL from VPP
self.vpp.delete_macip_acl(acl_index=port_mac_ip_acl)
self.port_vpp_acls[sw_if_index]['l23'] = acl_index
def remove_acls_on_port(self, sw_if_index):
"""Removes all security group ACLs on the vpp port
Arguments:-
sw_if_index - Software index of the port on which ACLs are to be
removed
"""
# We should know about the existing ACLS on port by looking up
# port_vpp_acls. If there is a KeyError, we do not know about any
# ACLs on that port. So ignore
try:
self.vpp.delete_acl_list_on_interface(sw_if_index)
del self.port_vpp_acls[sw_if_index]['l34']
except KeyError:
LOG.debug("No Layer3 ACLs are set on interface %s.. nothing "
"to delete", sw_if_index)
def remove_mac_ip_acl_on_port(self, sw_if_index):
"""Removes all MAC/IP ACLs on the vpp port
These ACLs correspond to anti-spoof and allowed-address-pair.
Arguments:-
sw_if_index - Software index of the port on which ACLs are to be
removed
"""
try:
l2_acl_index = self.port_vpp_acls[sw_if_index]['l23']
self.vpp.delete_macip_acl_on_interface(sw_if_index, l2_acl_index)
self.vpp.delete_macip_acl(l2_acl_index)
del self.port_vpp_acls[sw_if_index]['l23']
except KeyError:
LOG.debug("No mac_ip ACLs are set on interface %s.. nothing "
"to delete", sw_if_index)
def spoof_filter_on_host(self):
"""Adds a spoof filter ACL on host if not already present.
A spoof filter is identified by a common spoof tag mark.
If not present create the filter on VPP, If it is present, replace
it for good measure to ensure that the correct anti-spoof rules
are always applied.
Return: VppAcl(in_idx, out_idx)
"""
# Check if we have an existing spoof filter deployed on vpp
spoof_acl = self.secgroups.get(COMMON_SPOOF_TAG)
# Get the current anti-spoof filter rules. If a spoof filter is
# present replace rules for good measure, else create a new
# spoof filter
spoof_filter_rules = self.get_spoof_filter_rules()
if spoof_acl:
in_acl_idx, out_acl_idx = spoof_acl.in_idx, spoof_acl.out_idx
else:
in_acl_idx = out_acl_idx = 0xffffffff
in_acl_idx = self.vpp.acl_add_replace(
acl_index=in_acl_idx,
tag=common_spoof_tag(VPP_TO_VM),
rules=spoof_filter_rules['ingress'])
out_acl_idx = self.vpp.acl_add_replace(
acl_index=out_acl_idx,
tag=common_spoof_tag(VM_TO_VPP),
rules=spoof_filter_rules['egress'])
# Add the new spoof ACL to secgroups mapping if it is valid
if (in_acl_idx != 0xFFFFFFFF
and out_acl_idx != 0xFFFFFFFF and not spoof_acl):
spoof_acl = VppAcl(in_acl_idx, out_acl_idx)
self.secgroups[COMMON_SPOOF_TAG] = spoof_acl
return spoof_acl
def _pack_address(self, ip_addr):
"""Pack an IPv4 or IPv6 (ip_addr or ip_network) into binary.
If the argument is an ip_address, it is packed and if the argument is
an ip_network only the network portion of it is packed
Arguments:-
ip_addr: an IPv4 or IPv6 address without a prefix_length e.g. 1.1.1.1
(or)
an IPv4 or IPv6 network with prefix_length e.g. 1.1.1.0/24
"""
# Works for both addresses and the net address of masked networks
return ip_network(ip_addr).network_address.packed
def _get_snat_indexes(self, floatingip_dict):
"""Return the internal and external interface indices for SNAT.
Ensure the internal n/w, external n/w and their corresponding
BVI loopback interfaces are present, before returning their
index values.
"""
# Get internal network details.
internal_network_data = self.net_driver.ensure_network(
floatingip_dict['internal_physnet'],
floatingip_dict['internal_net_type'],
floatingip_dict['internal_segmentation_id'])
# Get the external network details
external_network_data = self.net_driver.ensure_network(
floatingip_dict['external_physnet'],
floatingip_dict['external_net_type'],
floatingip_dict['external_segmentation_id'])
if internal_network_data and external_network_data:
int_br_idx = internal_network_data['bridge_domain_id']
ext_br_idx = external_network_data['bridge_domain_id']
# Return the internal and external BVI loopback intf indxs.
return (self.ensure_bridge_bvi(int_br_idx),
self.ensure_bridge_bvi(ext_br_idx))
else:
LOG.error('Failed to ensure network on host while setting SNAT')
return None, None
def _delete_external_subinterface(self, floatingip_dict):
"""Check if the external subinterface can be deleted.
It can be deleted if it still exists and has no more
addresses.
"""
external_physnet = floatingip_dict['external_physnet']
external_net_type = floatingip_dict['external_net_type']
external_segmentation_id = floatingip_dict['external_segmentation_id']
external_network_data = self.net_driver.get_network(
external_physnet, external_net_type, external_segmentation_id)
if external_network_data:
physnet_ip_addrs = self.vpp.get_interface_ip_addresses(
external_network_data['if_uplink_idx'])
if not physnet_ip_addrs:
self.net_driver.delete_network(
external_physnet, external_net_type,
external_segmentation_id)
def _ensure_external_vlan_subif(self, if_name, if_idx, seg_id):
sub_if = self.vpp.get_vlan_subif(if_name, seg_id)
if not sub_if:
# Create a VLAN subif
sub_if = self.vpp.create_vlan_subif(if_idx, seg_id)
self.vpp.ifup(sub_if)
return sub_if
def ensure_bridge_bvi(self,
bridge_idx: vpp.br_idx_t,
mac_address: vpp.mac_str_t = None) -> vpp.if_idx_t:
"""Ensure a BVI loopback interface for the bridge."""
bvi_if_idx = self.vpp.get_bridge_bvi(bridge_idx)
if not bvi_if_idx:
bvi_if_idx = self.vpp.create_loopback(mac_address)
self.vpp.set_loopback_bridge_bvi(bvi_if_idx, bridge_idx)
return bvi_if_idx
def ensure_router_interface_on_host(self, port_id, router_data):
"""Ensure a router interface on the local host.
Creates a loopback interface and sets the bridge's BVI to the
loopback interface to act as an L3 gateway for the network.
For external networks, the BVI functions as an SNAT external
interface. For updating an interface, the service plugin removes
the old interface and then adds the new router interface. If an
external gateway exists, ensures a local route in VPP.
When Layer3 HA is enabled, the router interfaces are only enabled on
the active VPP router. The standby router keeps the interface in
an admin down state.
"""
# The interface could be either an external_gw or an internal router
# interface on a subnet
# Enable SNAT by default unless it is set to False
enable_snat = True
# Multiple routers on a shared external subnet is supported
# by adding local routes in VPP.
is_local = 0 # True for local-only VPP routes.
# Create an external interfce if the external_gateway_info key is
# present, else create an internal interface
if router_data.get('external_gateway_info', False):
seg_id = router_data['external_segmentation_id']
net_type = router_data['external_net_type']
physnet = router_data['external_physnet']
vrf = vpp.DEFAULT_VRF
is_inside = False
enable_snat = router_data['external_gateway_info']['enable_snat']
external_gateway_ip = router_data['external_gateway_ip']
# To support multiple IP addresses on a router port, add
# the router to each of the subnets.
gateway_ip = router_data['gateways'][0][0]
prefixlen = router_data['gateways'][0][1]
is_ipv6 = bool(router_data['gateways'][0][2])
else:
seg_id = router_data['segmentation_id']
net_type = router_data['net_type']
physnet = router_data['physnet']
vrf = router_data['vrf_id']
is_inside = True
external_gateway_ip = None
gateway_ip = router_data['gateway_ip']
prefixlen = router_data['prefixlen']
is_ipv6 = bool(router_data['is_ipv6'])
# Ensure the network exists on host and get the network data
net_data = self.net_driver.ensure_network(physnet, net_type, seg_id)
# Get the bridge domain id and ensure a BVI interface for it
bridge_idx = net_data['bridge_domain_id']
# Ensure a BVI (i.e. A loopback) for the bridge domain
loopback_idx = self.vpp.get_bridge_bvi(bridge_idx)
# Create a loopback BVI interface
loopback_mac = vpp.mac_str_t(router_data['loopback_mac'])
if loopback_idx is None:
# Create the loopback interface, but don't bring it UP yet
loopback_idx = self.ensure_bridge_bvi(bridge_idx, loopback_mac)
# Set the VRF for tenant BVI interfaces, if not already set
if vrf and not self.vpp.get_interface_vrf(loopback_idx) == vrf:
self.vpp.set_interface_vrf(loopback_idx, vrf, is_ipv6)
# Make a best effort to set the MTU on the interface
try:
self.vpp.set_interface_mtu(loopback_idx, router_data['mtu'])
except SystemExit:
# Log error and continue, do not exit here
LOG.error("Error setting MTU on router interface")
ha_enabled = cfg.CONF.ml2_vpp.enable_l3_ha
if ha_enabled:
# Now bring up the loopback interface, if this router is the
# ACTIVE router. Also populate the data structure
# router_interface_states so the HA code can activate and
# deactivate the interface
if self.router_state:
LOG.debug("Router HA state is ACTIVE")
LOG.debug("Bringing UP the router intf idx: %s", loopback_idx)
self.vpp.ifup(loopback_idx)
self.router_interface_states[loopback_idx] = 1
else:
LOG.debug("Router HA state is BACKUP")
LOG.debug("Bringing DOWN the router intf idx: %s",
loopback_idx)
self.vpp.ifdown(loopback_idx)
self.router_interface_states[loopback_idx] = 0
LOG.debug("Current router interface states: %s",
self.router_interface_states)
else:
self.vpp.ifup(loopback_idx)
# Set SNAT on the interface if SNAT is enabled
# Get a list of all SNAT interfaces
int_list = self.vpp.get_snat_interfaces()
if loopback_idx not in int_list and enable_snat:
self.vpp.set_snat_on_interface(loopback_idx, is_inside)
# Set the SNAT 1:N overload on the external loopback interface
if not is_inside:
self.vpp.snat_overload_on_interface_address(loopback_idx)
# Add GPE mappings for GPE type networks only on the master
# node, if ha_enabled
if net_type == TYPE_GPE and self.gpe is not None:
if (ha_enabled and self.router_state) or not ha_enabled:
self.gpe.add_local_gpe_mapping(seg_id, loopback_mac)
# Set the gateway IP address on the BVI interface, if not already set
addresses = self.vpp.get_interface_ip_addresses(loopback_idx)
gw_ipif = ip_interface((gateway_ip, prefixlen,))
# Is there another gateway ip_addr set on this external loopback?
if not is_inside:
# Any address other than the one we're thinking of?
exists_gateway = any((addr for addr in addresses
if addr != gw_ipif))
if exists_gateway:
LOG.debug('A router gateway exists on the external network.'
'The current router gateway IP: %s will be added as '
'a local VPP route', str(gw_ipif))
if gw_ipif not in addresses:
# This address is not yet present?
# Add a local VRF route if another external gateway exists
if not is_inside and exists_gateway:
is_local = True
ip_prefix_length = 32 if gw_ipif.version == 4 else 128
# Add a local IP route if it doesn't exist
self.vpp.add_ip_route(vrf=vrf,
ip_address=self._pack_address(
gateway_ip),
prefixlen=ip_prefix_length,
next_hop_address=None,
next_hop_sw_if_index=None,
is_ipv6=is_ipv6,
is_local=is_local)
else:
self.vpp.set_interface_ip(loopback_idx, gw_ipif)
router_dict = {
'segmentation_id': seg_id,
'physnet': physnet,
'net_type': net_type,
'bridge_domain_id': bridge_idx,
'bvi_if_idx': loopback_idx,
'gateway_ip': gateway_ip,
'prefixlen': prefixlen,
'is_ipv6': is_ipv6,
'mac_address': loopback_mac,
'is_inside': is_inside,
'external_gateway_ip': external_gateway_ip,
'vrf_id': vrf,
'uplink_idx': net_data.get('if_uplink_idx'),
'is_local': is_local
}
if is_inside:
LOG.debug("Router: Created inside router port: %s",
router_dict)
self.router_interfaces[port_id] = router_dict
# Ensure that all gateway networks are exported into this
# tenant VRF &
# A default route exists in this VRF to the external gateway
self.export_routes_from_tenant_vrfs(
source_vrf=router_dict['vrf_id'])
else:
LOG.debug("Router: Created outside router port: %s",
router_dict)
self.router_external_interfaces[port_id] = router_dict
# TODO(onong):
# The current VPP NAT implementation supports only one outside
# FIB table and by default it uses table 0, ie, the default vrf.
# So, this is a temporary workaround to tide over the limitation.
if not is_local:
self.default_route_in_default_vrf(router_dict)
# Ensure that the gateway network is exported into all tenant
# VRFs, with the correct default routes
self.export_routes_from_tenant_vrfs(
ext_gw_ip=router_dict['external_gateway_ip'])
return loopback_idx
def become_master_router(self):
"""This node will become the master router"""
LOG.debug("VPP becoming the master router..")
LOG.debug("Current router interface states: %s",
self.router_interface_states)
for idx in self.router_interface_states:
if not self.router_interface_states[idx]:
LOG.debug("Bringing UP the router interface: %s", idx)
# TODO(najoy): Bring up intf. only if not set to admin DOWN
self.vpp.ifup(idx)
self.router_interface_states[idx] = 1
LOG.debug("New router interface states: %s",
self.router_interface_states)
def become_backup_router(self):
"""This node will become the backup router"""
LOG.debug("VPP becoming the standby router..")
LOG.debug("Current router interface states: %s",
self.router_interface_states)
for idx in self.router_interface_states:
if self.router_interface_states[idx]:
LOG.debug("Bringing DOWN the router interface: %s", idx)
self.vpp.ifdown(idx)
self.router_interface_states[idx] = 0
LOG.debug("New router interface states: %s",
self.router_interface_states)
def _get_ip_network(self, gateway_ip, prefixlen):
"""Returns the IP network for the gateway in CIDR form."""
return str(ip_interface(gateway_ip + "/" + str(prefixlen)).network)
def default_route_in_default_vrf(self, router_dict, is_add=True):
# ensure that default route in default VRF is present
if is_add:
self.vpp.add_ip_route(
vrf=router_dict['vrf_id'],
ip_address=self._pack_address('0.0.0.0'),
prefixlen=0,
next_hop_address=self._pack_address(
router_dict['external_gateway_ip']),
next_hop_sw_if_index=router_dict['bvi_if_idx'],
is_ipv6=router_dict['is_ipv6'])
else:
self.vpp.delete_ip_route(
vrf=router_dict['vrf_id'],
ip_address=self._pack_address('0.0.0.0'),
prefixlen=0,
next_hop_address=self._pack_address(
router_dict['external_gateway_ip']),
next_hop_sw_if_index=router_dict['bvi_if_idx'],
is_ipv6=router_dict['is_ipv6'])
def export_routes_from_tenant_vrfs(self, source_vrf=0, is_add=True,
ext_gw_ip=None):
"""Exports the external gateway into the tenant VRF.
The gateway network has to be exported into the tenant VRF for
it to communicate with the outside world. Also a default route
has to be set to the external gateway IP address.
If source_vrf (i.e tenant VRF) is provided,
- Export the external gateway's IP from VRF=0 into this VRF.
- Add a default route to the external_gateway in this VRF
Else,
- Export the external gateway into into all tenant VRFs
- Add a default route to the external_gateway in all tenant VRFs
If the external gateway IP address is not provided:
All external networks are exported into tenant VRFs
"""
if source_vrf:
LOG.debug("Router:Exporting external route into tenant VRF:%s",
source_vrf)
else:
LOG.debug("Router:Exporting external route into all tenant VRFs")
# TODO(najoy): Check if the tenant ID matches for the gateway router
# external interface and export only matching external routes.
for ext_port in self.router_external_interfaces:
gw_port = self.router_external_interfaces[ext_port]
for int_port in self.router_interfaces.values():
int_vrf = int_port['vrf_id']
ext_vrf = gw_port['vrf_id']
# If a source vrf is present only update if the VRF matches
if source_vrf and int_vrf != source_vrf:
continue
is_ipv6 = int_port['is_ipv6']
default_gw_ip = "::" if is_ipv6 else '0.0.0.0'
external_gateway_ip = gw_port['external_gateway_ip']
if ext_gw_ip and external_gateway_ip != ext_gw_ip:
continue
# Get the external and internal networks in the CIDR form
ext_network = self._get_ip_network(
gw_port['gateway_ip'],
gw_port['prefixlen']
)
int_network = self._get_ip_network(
int_port['gateway_ip'],
int_port['prefixlen']
)
if is_add:
# Add the default route (0.0.0.0/0) to the
# external gateway IP addr, which is outside of VPP
# with the next hop sw_if_index set to the external
# loopback BVI address.
# Note: The external loopback sw_if_index and the
# next_hop_address is mandatory here to prevent a VPP
# crash - Similar to the CLI command
# ip route add table <int-vrf> 0.0.0.0/0 via <next-hop-ip>
# <next-hop-sw-indx>
#
# Note(onong): Do not set IPv6 default gateway to an IPv4
# external gateway
ext_ip = ip_address(external_gateway_ip)
if is_ipv6 and ext_ip.version != 6:
LOG.info('Not setting IPv6 default route via an IPv4'
' external gateway')
else:
self.vpp.add_ip_route(
vrf=int_vrf,
ip_address=self._pack_address(default_gw_ip),
prefixlen=0,
next_hop_address=self._pack_address(
external_gateway_ip),
next_hop_sw_if_index=gw_port['bvi_if_idx'],
is_ipv6=is_ipv6)
# Export the external gateway subnet into the tenant VRF
# to enable tenant traffic to flow out. Exporting is done
# by setting the next hop sw if index to the loopback's
# sw_index (i.e. BVI) on the external network
# CLI: ip route add table <int_vrf> <external-subnet>
# via <next-hop-sw-indx>
#
# Note(onong): Do not export an IPv4 external network
# into an IPv6 VRF.
ext_net = ip_network(ext_network)
if is_ipv6 and ext_net.version != 6:
LOG.info('Not exporting IPv4 external network into '
'tenant\'s IPv6 VRF')
else:
self.vpp.add_ip_route(
vrf=int_vrf,
ip_address=self._pack_address(ext_network),
prefixlen=gw_port['prefixlen'],
next_hop_address=None,
next_hop_sw_if_index=gw_port['bvi_if_idx'],
is_ipv6=is_ipv6)
# Export the tenant network into external VRF so the
# gateway can route return traffic to the tenant VM from
# the Internet.
# CLI: ip route add table 0 <tenant-subnet> via
# <tenant-loopback-bvi>
#
# Note(onong): Do not export an IPv4 internal network
# into an IPv6 external VRF.
int_net = ip_network(int_network)
if is_ipv6 and int_net.version != 6:
LOG.info('Not exporting tenant\'s IPv4 internal '
'network into IPv6 external VRF')
else:
self.vpp.add_ip_route(
vrf=ext_vrf,
ip_address=self._pack_address(int_network),
prefixlen=int_port['prefixlen'],
next_hop_address=None,
next_hop_sw_if_index=int_port['bvi_if_idx'],
is_ipv6=is_ipv6)
else:
self.vpp.delete_ip_route(
vrf=int_vrf,
ip_address=self._pack_address(default_gw_ip),
prefixlen=0,
next_hop_address=self._pack_address(
external_gateway_ip),
next_hop_sw_if_index=gw_port['bvi_if_idx'],
is_ipv6=is_ipv6)
# Delete the exported route in tenant VRF
self.vpp.delete_ip_route(
vrf=int_vrf,
ip_address=self._pack_address(ext_network),
prefixlen=gw_port['prefixlen'],
next_hop_address=None,
next_hop_sw_if_index=gw_port['bvi_if_idx'],
is_ipv6=is_ipv6)
# Delete the exported route from the external VRF
self.vpp.delete_ip_route(
vrf=ext_vrf,
ip_address=self._pack_address(int_network),
prefixlen=int_port['prefixlen'],
next_hop_address=None,
next_hop_sw_if_index=int_port['bvi_if_idx'],
is_ipv6=is_ipv6)
def delete_router_interface_on_host(self, port_id):
"""Deletes a router interface from the host.
Disables SNAT, if it is set on the interface.
Deletes a loopback interface from the host, this removes the BVI
interface from the local bridge. Also, delete the default route and
SNAT address for the external interface.
"""
is_external = 0
if port_id in self.router_interfaces:
router = self.router_interfaces[port_id]
elif port_id in self.router_external_interfaces:
router = self.router_external_interfaces[port_id]
is_external = 1
ext_intf_ip = '{}/{}'.format(router['gateway_ip'],
router['prefixlen'])
# Get all local IP addresses in the external VRF belonging
# to the same external subnet as this router.
# Check if atleast one local_ip matches a neutron assigned
# external IP address of the router.
# If there's no match, there are no valid local IPs within VPP.
local_gw_ips = [r['gateway_ip'] for
r in self.router_external_interfaces.values()
if r['is_local']]
# While, in theory, there may be multiple IPs on an interface,
# in practice, we only program one (and program additional
# IPs via a local route).
# TODO(ijw): this is a somewhat unclean way of removing IP
# addresses attached to VPP interfaces that are in the
# subnet of ext_intf_ip, I think. Unclear if this is the
# right way to do that versus remembering the interface.
local_ip: Optional[str]
for ip in self.vpp.get_local_ip_address(ext_intf_ip,
router['is_ipv6'],
router['vrf_id']):
# Is the local_ip valid?
if ip in local_gw_ips:
LOG.debug('Found a router external local_ip in VPP: %s',
ip)
local_ip = ip
break
# For-else would mean no breaks i.e. no valid local_ips
else:
local_ip = None
else:
LOG.error("Router port:%s deletion error...port not found",
port_id)
return False
net_br_idx = router['bridge_domain_id']
bvi_if_idx = self.vpp.get_bridge_bvi(net_br_idx)
# If an external local route, we can safetly delete it from VPP
# Don't delete any SNAT
if is_external and router['is_local']:
LOG.debug("delete_router_intf: Removing the local route: %s/32",
router['gateway_ip'])
prefixlen = 128 if router['is_ipv6'] else 32
self.vpp.delete_ip_route(vrf=router['vrf_id'],
ip_address=self._pack_address(
router['gateway_ip']),
prefixlen=prefixlen,
next_hop_address=None,
next_hop_sw_if_index=None,
is_ipv6=router['is_ipv6'],
is_local=True)
# External router is a loopback BVI. If a local route exists,
# replace the BVI's IP address with its IP address.
# Don't delete the SNAT.
elif is_external and local_ip is not None:
LOG.debug('delete_router_intf: replacing router loopback BVI IP '
'address %s with the local ip address %s',
router['gateway_ip'], local_ip)
# Delete the IP address from the BVI.
if bvi_if_idx is not None:
self.vpp.del_interface_ip(
bvi_if_idx,
ip_interface((router['gateway_ip'],
router['prefixlen'],)))
# Delete the local route
prefixlen = 128 if router['is_ipv6'] else 32
self.vpp.delete_ip_route(vrf=router['vrf_id'],
ip_address=self._pack_address(local_ip),
prefixlen=prefixlen,
next_hop_address=None,
next_hop_sw_if_index=None,
is_ipv6=router['is_ipv6'],
is_local=True)
if bvi_if_idx is not None:
self.vpp.set_interface_ip(
bvi_if_idx,
ip_interface((local_ip, router['prefixlen'],)))
# Set the router external interface corresponding to the local
# route as non-local.
for router in self.router_external_interfaces.values():
if ip_address(router['gateway_ip']) == \
ip_address(local_ip):
router['is_local'] = 0
LOG.debug('Router external %s is no longer a local '
'route but now assigned to the BVI', router)
else:
# At this point, we can safetly remove both the SNAT and BVI
# loopback interfaces as no local routes exist.
snat_interfaces = self.vpp.get_snat_interfaces()
# Get SNAT out interfaces whose IP addrs are overloaded
snat_out_interfaces = self.vpp.get_outside_snat_interface_indices()
# delete SNAT if set on this interface
if router['bvi_if_idx'] in snat_interfaces:
LOG.debug('Router: Deleting SNAT on interface '
'index: %s', router['bvi_if_idx'])
self.vpp.set_snat_on_interface(router['bvi_if_idx'],
is_inside=router['is_inside'],
is_add=False)
# Delete the external 1:N SNAT and default routes in all VRFs
# for external router interface deletion
if not router['is_inside']:
LOG.debug('Router: Deleting external gateway port %s for '
'router: %s', port_id, router)
# Delete external snat addresses for the router
if router['bvi_if_idx'] in snat_out_interfaces:
LOG.debug('Router:Removing 1:N SNAT on external interface '
'index: %s', router['bvi_if_idx'])
self.vpp.snat_overload_on_interface_address(
router['bvi_if_idx'],
is_add=False)
# Delete all exported routes into tenant VRFs belonging to this
# external gateway
self.export_routes_from_tenant_vrfs(
ext_gw_ip=router['external_gateway_ip'], is_add=False)
# delete the default route in the default VRF
self.default_route_in_default_vrf(router, is_add=False)
else:
# Delete all exported routes from this VRF
self.export_routes_from_tenant_vrfs(source_vrf=router[
'vrf_id'], is_add=False)
# Delete the gateway IP address and the BVI interface if this is
# the last IP address assigned on the BVI
if bvi_if_idx is not None:
# Get all IP's assigned to the BVI interface
addresses = self.vpp.get_interface_ip_addresses(bvi_if_idx)
if len(addresses) > 1:
# Dont' delete the BVI, only remove one IP from it
self.vpp.del_interface_ip(
bvi_if_idx,
ip_interface((router['gateway_ip'],
router['prefixlen'],)))
else:
# Last subnet assigned, delete the interface
self.vpp.delete_loopback(bvi_if_idx)
if cfg.CONF.ml2_vpp.enable_l3_ha:
self.router_interface_states.pop(bvi_if_idx, None)
# Remove any local GPE mappings
if router['net_type'] == TYPE_GPE and self.gpe is not None:
LOG.debug('Removing local GPE mappings for router '
'interface: %s', port_id)
self.gpe.delete_local_gpe_mapping(router['segmentation_id'],
router['mac_address'])
if not is_external:
self.router_interfaces.pop(port_id)
else:
self.router_external_interfaces.pop(port_id)
def maybe_associate_floating_ips(self):
"""Associate any pending floating IP addresses.
We may receive a request to associate a floating
IP address, when the router BVI interfaces are not ready yet. So,
we queue such requests and do the association when the router
interfaces are ready.
"""
LOG.debug('Router: maybe associating floating IPs: %s',
self.floating_ips)
for floatingip in self.floating_ips:
if not self.floating_ips[floatingip]['state']:
fixedip_addr = self.floating_ips[
floatingip]['fixed_ip_address']
floatingip_addr = self.floating_ips[
floatingip]['floating_ip_address']
loopback_idx = self.floating_ips[floatingip]['loopback_idx']
external_idx = self.floating_ips[floatingip]['external_idx']
self._associate_floatingip(floatingip, fixedip_addr,
floatingip_addr, loopback_idx,
external_idx)
def _associate_floatingip(self, floatingip, fixedip_addr,
floatingip_addr, loopback_idx, external_idx):
"""Associate the floating ip address and update state."""
# It is possible that during a VPP+agent restart scenario, the tenant's
# VRF has not been set on the loopback
tenant_vrf = self.vpp.get_interface_vrf(loopback_idx)
if not tenant_vrf:
LOG.debug("Router: Tenant VRF:%s not been set yet", tenant_vrf)
return
else:
LOG.debug('Router: Tenant VRF:%s, floating IP:%s and bvi_idx:%s',
tenant_vrf, floatingip_addr, loopback_idx)
LOG.debug("Router: associating floatingip:%s with fixedip: %s "
"loopback_idx:%s, external_idx:%s", floatingip_addr,
fixedip_addr, loopback_idx, external_idx)
snat_interfaces = self.vpp.get_snat_interfaces()
if loopback_idx and loopback_idx not in snat_interfaces:
self.vpp.set_snat_on_interface(loopback_idx)
if external_idx and external_idx not in snat_interfaces:
self.vpp.set_snat_on_interface(external_idx, is_inside=False)
#
# For different tenants mapped to different VRFs, it is quite possible
# that the same fixed IP addr is mapped to different floating IP addrs,
# for example:
#
# (192.168.10.5, FIP1, VRF1)
# (192.168.10.5, FIP2, VRF2)
#
# So, we check for (localip, extip, tenenat_vrf) in VPP before creating
# the mapping.
(localip, extip) = (ip_address(fixedip_addr),
ip_address(floatingip_addr))
add_nat_mapping = True
for m in self.vpp.get_snat_static_mappings():
if (localip == m.local_ip_address and
extip == m.external_ip_address and tenant_vrf == m.vrf_id):
add_nat_mapping = False
if add_nat_mapping:
LOG.debug("Router: setting 1:1 SNAT %s:%s in tenant_vrf:%s",
fixedip_addr, floatingip_addr, tenant_vrf)
self.vpp.set_snat_static_mapping(localip, extip,
tenant_vrf)
# Clear any dynamic NAT sessions for the 1:1 NAT to take effect
self.vpp.clear_snat_sessions(localip)
self.floating_ips[floatingip]['tenant_vrf'] = tenant_vrf
self.floating_ips[floatingip]['state'] = True
LOG.debug('Router: Associated floating IP: %s',
self.floating_ips[floatingip])
else:
LOG.debug('Router: SNAT 1:1 mapping already exists for floating'
'IP: %s', self.floating_ips[floatingip])
def associate_floatingip(self, floatingip, floatingip_dict):
"""Add the VPP configuration to support One-to-One SNAT.
Arguments:-
floating_ip: The UUID of the floating ip address
floatingip_dict : The floating ip data
"""
LOG.debug("Router: Checking for existing association for"
" floating ip: %s", floatingip)
if floatingip in self.floating_ips:
self.disassociate_floatingip(floatingip)
else:
LOG.debug("Router: Found no existing association for floating ip:"
" %s", floatingip)
LOG.debug('Router: Associating floating ip address: %s: %s',
floatingip, floatingip_dict)
loopback_idx, external_idx = self._get_snat_indexes(floatingip_dict)
LOG.debug('Router: Retrieved floating ip intf indxs- int:%s, ext:%s',
loopback_idx, external_idx)
self.floating_ips[floatingip] = {
'fixed_ip_address': floatingip_dict['fixed_ip_address'],
'floating_ip_address': floatingip_dict['floating_ip_address'],
'loopback_idx': loopback_idx,
'external_idx': external_idx,
'state': False
}
tenant_vrf = self.vpp.get_interface_vrf(loopback_idx)
# Associate the floating IP iff the router has established a tenant
# VRF i.e. a vrf_id > 0
if tenant_vrf:
LOG.debug("Router: associate_floating_ip: tenant_vrf:%s BVI:%s",
tenant_vrf, loopback_idx)
self.floating_ips[floatingip]['tenant_vrf'] = tenant_vrf
self._associate_floatingip(floatingip,
floatingip_dict['fixed_ip_address'],
floatingip_dict['floating_ip_address'],
loopback_idx,
external_idx)
else:
self.floating_ips[floatingip]['tenant_vrf'] = 'undecided'
def disassociate_floatingip(self, floatingip):
"""Remove the VPP configuration used by One-to-One SNAT.
Arguments:-
floating_ip: The UUID of the floating ip address to be disassociated.
"""
LOG.debug('Router: Disassociating floating ip address:%s',
floatingip)
# Check if we know about this floating ip address
floatingip_dict = self.floating_ips.get(floatingip)
if floatingip_dict:
# Delete the SNAT internal and external IP address mapping.
LOG.debug('Router: deleting NAT mappings for floating ip: %s',
floatingip)
snat_local_ipaddresses = self.vpp.get_snat_local_ipaddresses()
if floatingip_dict['fixed_ip_address'] in snat_local_ipaddresses:
self.vpp.set_snat_static_mapping(
ip_address(floatingip_dict['fixed_ip_address']),
ip_address(floatingip_dict['floating_ip_address']),
floatingip_dict['tenant_vrf'],
is_add=False)
self.floating_ips.pop(floatingip)
else:
LOG.debug('router: floating ip address: %s not found to be '
'disassociated', floatingip)
def get_spoof_filter_rules(self):
"""Build and return a list of anti-spoofing rules.
Returns a dict with two keys named: ingress_rules and egress_rules
ingress_rules = a list of ingress rules
egress_rules = a list of egress rules
"""
def _compose_rule(is_permit,
src_prefix,
dst_prefix,
proto,
srcport_or_icmptype_first,
srcport_or_icmptype_last,
dstport_or_icmpcode_first,
dstport_or_icmpcode_last):
# Set is_permit = 2 if reflexive_acls and tcp/udp/ip traffic
if is_permit == 1 and reflexive_acls and proto in [6, 17, 0]:
is_permit = 2
return {
'is_permit': is_permit,
'src_prefix': ip_network(src_prefix),
'dst_prefix': ip_network(dst_prefix),
'proto': proto,
'srcport_or_icmptype_first': srcport_or_icmptype_first,
'srcport_or_icmptype_last': srcport_or_icmptype_last,
'dstport_or_icmpcode_first': dstport_or_icmpcode_first,
'dstport_or_icmpcode_last': dstport_or_icmpcode_last
}
# Ingress filter rules to allow DHCP and ICMPv6 into VM
# Allow incoming DHCP offer packets from dhcp servers
# UDP src_port 67 (ipv4 dhcp server) and dst_port 68 (dhclient)
# UDP src_port 547 (ipv6 dhserver) and dst_port 546 (ipv6 dclient)
ingress_rules = [
_compose_rule(1, '0.0.0.0/0', '0.0.0.0/0',
17, 67, 67, 68, 68),
_compose_rule(1, '::/0', '::/0',
17, 547, 547, 546, 546),
]
# Allow Icmpv6 Multicast listener Query, Report, Done (130,131,132)
# neighbor soliciation (135) and neighbor advertisement (136) and
# MLD2_REPORT (143) and ICMP_RA into the Instance
ICMP_RA = n_const.ICMPV6_TYPE_RA
for ICMP_TYPE in [130, 131, 132, 135, 136, 143, ICMP_RA]:
ingress_rules.append(
_compose_rule(1, '::/0', '::/0',
58, ICMP_TYPE, ICMP_TYPE, 0, 255)
)
# Egress spoof_filter rules from VM
# Permit DHCP client packets (discovery + request)
# UDP src_port 68 (ipv4 client) and dst_port 67 (ipv4 dhcp server)
# UDP src_port 546 (ipv6 client) and dst_port 547 (ipv6 dhcp server)
# Drop DHCP Offer packets originating from VM
# src_port 67 and dst_port 68
# src_port 547 and dst_port 546
# Drop icmpv6 Router Advertisements from VMs.
# Allow other outgoing icmpv6 packets
# When packets are fragmented (as UCP(v6), ICMP(v6) and TCPv4 packets
# all can be, VPP will match any fragment against the first rule
# relating to that address and protocol. It ignores things like ports
# and ICMP types because they aren't in the second and later fragments.
#
# If you want second and later fragments to get through, the first rule
# that matches them *must* be a 'permit' rule.
#
# In our case it only happens for ICMPv6; we add a permit rule on an
# invalid code to pre-empt the RA deny when matching fragments.
# For TCPv4/v6, and ICMPv4, we don't have deny rules in spoof SG. so we
# are good;
# For UDPv4/v6, we do have a permit rule of DHCPv4/v6, so we are good;
# For ICMPv6, we are adding a dummy permit rule to workaround this;
egress_rules = [
_compose_rule(1, '0.0.0.0/0', '0.0.0.0/0',
17, 68, 68, 67, 67),
_compose_rule(1, '::/0', '::/0',
17, 546, 546, 547, 547),
_compose_rule(0, '0.0.0.0/0', '0.0.0.0/0',
17, 67, 67, 68, 68),
_compose_rule(0, '::/0', '::/0',
17, 547, 547, 546, 546),
# Permits ICMPv6 fragments while not permitting (valid)
# packets (type 0 is invalid)
_compose_rule(1, '::/0', '::/0',
58, 0, 0, 0, 0),
# ... because this rule would otherwise match fragments, being
# the first rule, and would deny them
_compose_rule(0, '::/0', '::/0',
58, ICMP_RA, ICMP_RA, 0, 255),
_compose_rule(1, '::/0', '::/0',
58, 0, 255, 0, 255),
# Permit TCP port 80 traffic to 169.254.169.254/32 for metadata
_compose_rule(1, '0.0.0.0/0', '169.254.169.254/32',
6, 0, 65535, 80, 80),
]
return {'ingress': ingress_rules,
'egress': egress_rules}
LEADIN = nvpp_const.LEADIN # TODO(ijw): make configurable?
# TrunkWatcher thread's heartbeat interval
# TODO(onong): make it configurable if need be
TRUNK_WATCHER_HEARTBEAT = 30
class EtcdListener(object):
def __init__(self, host, client_factory, vppf, physnets):
self.host = host
self.client_factory = client_factory
self.vppf = vppf
self.physnets = physnets
self.pool = eventlet.GreenPool()
self.secgroup_enabled = cfg.CONF.SECURITYGROUP.enable_security_group
# Add GPE key-watching, if required
self.gpe_listener: Optional[gpe.GpeListener]
if TYPE_GPE in cfg.CONF.ml2.type_drivers:
self.gpe_listener = gpe.GpeListener(self)
else:
self.gpe_listener = None
# These data structures are used as readiness indicators.
# A port is only in here only if the attachment part of binding
# has completed.
# key: ifidx of port; value: (UUID, bound-callback, vpp-prop-dict)
self.iface_state = {}
# key: UUID of port; value: ifidx
self.iface_state_ifidx = {}
# Members of this are ports requiring security groups with unsatisfied
# requirements.
self.iface_awaiting_secgroups = {}
# Sub-ports of a trunk with pending port bindings.
# trunk_port ID => List(sub_ports awaiting binding)
# When the agent is restarted, it could receive an etcd watch event
# to bind subports even before the parent port itself is bound. This
# dict keeps tracks of such sub_ports. They will be reconsidered
# for binding after the parent is bound.
self.subports_awaiting_parents = {}
# bound subports of parent ports
# trunk_port ID => set(bound subports)
self.bound_subports = defaultdict(set)
# We also need to know if the vhostuser interface has seen a socket
# connection: this tells us there's a state change, and there is
# a state detection function on self.vppf.
self.vppf.vhost_ready_callback = self._vhost_ready
def unbind(self, id):
if id not in self.iface_state_ifidx:
# Unbinding an unknown port
return
if self.iface_state_ifidx[id] in self.iface_state:
del self.iface_state[self.iface_state_ifidx[id]]
del self.iface_state_ifidx[id]
self.vppf.unbind_interface_on_host(id)
def bind(self, bound_callback, id, binding_type, mac_address, physnet,
network_type, segmentation_id, security_data):
"""Bind an interface as instructed by ML2 on this host.
The interface as a network and binding type. Assuming the
network as been dropped onto the physnet specified, bind
that uplink to the interface in question by creating an
interface of the appropriate form and propagating the network
to it.
This call also identifies if we should consider the interface
fully up. This may happen now, or, asynchronously, later,
depending on whether all the prerequisites are in place. That
includes the behaviour of whatever's on the other end of the
interface.
"""
# args['binding_type'] in ('vhostuser', 'tap'):
# For GPE, fetch remote mappings from etcd for any "new" network
# segments we will be binding to so we are aware of all the remote
# overlay (mac) to underlay (IP) values
if network_type == TYPE_GPE and self.gpe_listener is not None:
# For GPE, a physnet value is not messaged by ML2 as it
# is not specified for creating a gpe tenant network. Hence for
# these net types we replace the physnet with the value of
# gpe_locators, which stand for the physnet name.
physnet = self.gpe_listener.physnet()
self.gpe_listener.ensure_gpe_remote_mappings(segmentation_id)
props = self.vppf.bind_interface_on_host(binding_type,
id,
mac_address,
physnet,
network_type,
segmentation_id)
if props is None:
# Problems with the binding
# We will never notify anyone this port is ready.
return None
# Store the binding information. We put this into
# etcd when the interface comes up to show that things
# are ready and expose it to curious operators, who may
# be able to debug with it. This may not happen
# immediately because the far end may not have connected.
iface_idx = props['iface_idx']
port_security_enabled = security_data.get('port_security_enabled',
True)
if port_security_enabled:
self.iface_awaiting_secgroups[iface_idx] = \
security_data.get('security_groups', [])
else:
# 'None' is a special value indicating no port security
self.iface_awaiting_secgroups[iface_idx] = None
self.iface_state[iface_idx] = (id, bound_callback, props)
self.iface_state_ifidx[id] = iface_idx
self.apply_spoof_macip(iface_idx, security_data, props)
self.maybe_apply_secgroups(iface_idx)
def vpp_restart_prepare(self):
"""On a restart, find bound ports and clean up unwanted config
Does the following:
- fixes uplinks
- identifies the ports we bound previously - they may need
removing or updating
Ports intended to be bound will have .bind() called later
in the resync, which will correcly populate VPPForwarder
structures and fix bindings whose type has changed; ports
that are no longer needed will be unbound.
Returns a set of bound ports
"""
LOG.debug('Repairing physnets in VPP')
self.vppf.fix_physnets(self.physnets)
LOG.debug('VPP has been cleaned of stale physnets')
return self.vppf.find_bound_ports()
def apply_spoof_macip(self, iface_idx, security_data, props):
"""Apply non-secgroup security to a port
This is an idempotent function to set up the port security
(antispoof and allowed-address-pair) that can be determined
solely from the data on the port itself.
"""
# TODO(ijw): this is a convenience for spotting L3 and DHCP
# ports, but it's not the right way
is_secured_port = props['bind_type'] == 'vhostuser'
port_security_enabled = security_data.get('port_security_enabled',
True)
# If (security-groups and port_security)
# are enabled and it's a vhostuser port
# proceed to set L3/L2 ACLs, else skip security
if (self.secgroup_enabled and
port_security_enabled and
is_secured_port):
# Set Allowed address pairs and mac-spoof filter
aa_pairs = security_data.get('allowed_address_pairs', [])
self.set_mac_ip_acl_on_port(
security_data['mac_address'],
security_data.get('fixed_ips'),
aa_pairs,
iface_idx)
else:
self.vppf.remove_mac_ip_acl_on_port(iface_idx)
def reconsider_port_secgroups(self):
"""Check current port security state.
See if any of the ports awaiting security group ACL population can
now be secured.
"""
# TODO(ijw): could be more efficient in selecting ports to check
for iface_idx in self.iface_awaiting_secgroups.keys():
self.maybe_apply_secgroups(iface_idx)
def maybe_apply_secgroups(self, iface_idx):
"""Apply secgroups to a port if all constructs are available
This is an idempotent function to set up port security. It
relies on the pre-existence of the ACLs corresponding to
security groups, so it may or may not be possible to apply
security at this moment in time. If it is, the port is
recorded as secure (allowing binding to complete), and if it
isn't we will attempt to reapply as more security groups are
created.
It is reapplied if the security group list changes on the
port. It is not reapplied if the security group content is
changed, because the ACL number remains the same and therefore
so does the port config.
"""
secgroup_ids = self.iface_awaiting_secgroups[iface_idx]
try:
(id, bound_callback, props) = self.iface_state[iface_idx]
except KeyError: # The port was unbound before we could apply ACLs
LOG.info("Interface idx %s unbound before "
"security-group(s) could be applied", iface_idx)
self.iface_awaiting_secgroups.pop(iface_idx, None)
return
# TODO(ijw): this is a convenience for spotting L3 and DHCP
# ports, but it's not the right way
# (TODO(ijw) it's also the only reason we go to iface_state)
is_secured_port = props['bind_type'] == 'vhostuser'
# If security-groups are enabled and it's a port needing
# security proceed to set L3/L2 ACLs, else skip security.
# If security-groups are empty, apply the default spoof-acls.
# This is the correct behavior when security-groups are enabled but
# not set on a port.
if (self.secgroup_enabled and
secgroup_ids is not None and # port security off
is_secured_port):
if not self.vppf.maybe_set_acls_on_port(
secgroup_ids,
iface_idx):
# The ACLs for secgroups are not yet ready
# Leave ourselves in the pending list
return
else:
LOG.debug("Clearing port_security on "
"port %s", id)
self.vppf.remove_acls_on_port(
iface_idx)
# Remove with no error if not present
self.iface_awaiting_secgroups.pop(iface_idx, None)
self.maybe_up(iface_idx)
def _vhost_ready(self, id):
# The callback from VPP only knows the IP; convert
# .. and note that we may not know the conversion
iface_idx = self.iface_state_ifidx.get(id)
if iface_idx is None:
# Not a port we know about
return
self.maybe_up(iface_idx)
def maybe_up(self, iface_idx):
"""Flag that an interface is connected, if it is
This is a combination of 'we did our bit' and 'the other
end connected'. These can happen in either order; if
we resync, we recheck our binding but the other end
may have connected already.
This both tells Nova the interface is ready and brings the
interface up in VPP.
There is nothing wrong (other than a bit of inefficiency)
in sending this to Nova multiple times; the watching driver may
see the key write multiple times and will act accordingly.
"""
if iface_idx not in self.iface_state:
# Binding hasn't completed
return
(id, bound_callback, props) = self.iface_state[iface_idx]
# For trunk sub-ports, it's the parent vhostuser port that needs to
# be linked up
if 'parent_uuid' in props:
port_id = props['parent_uuid']
else:
port_id = id
if (props['bind_type'] == 'vhostuser' and
not self.vppf.vhostuser_linked_up(port_id)):
# vhostuser connection that hasn't yet found a friend
return
if iface_idx in self.iface_awaiting_secgroups:
return
LOG.debug('marking index %s as ready', id)
self.vppf.ifup(iface_idx)
bound_callback(id, props)
def acl_add_replace(self, secgroup, data):
"""Add or replace a VPP ACL.
Arguments:
secgroup - OpenStack SecurityGroup ID
data - SecurityGroup data from etcd
"""
def _secgroup_rule(r):
# Create a rule for the remote_ip_prefix (CIDR) value
if r['remote_ip_addr']:
remote_ip_prefixes = [(r['remote_ip_addr'],
r['ip_prefix_len'])]
# Create a rule for each ip address in the remote_group
else:
remote_group = r['remote_group_id']
prefix_length = 128 if r['is_ipv6'] else 32
ip_version = 6 if r['is_ipv6'] else 4
# Add the referencing secgroup ID to the remote-group lookup
# data set. This enables the RemoteGroupWatcher thread to
# lookup the secgroups that need to be updated for a
# remote-group etcd watch event
self.vppf.remote_group_secgroups[remote_group].add(secgroup)
remote_ip_prefixes = [
(ip, prefix_length) for port in
self.vppf.remote_group_ports[remote_group]
for ip in self.vppf.port_ips[port]
if ip_network(ip).version == ip_version]
LOG.debug("remote_group: vppf.remote_group_ports:%s",
self.vppf.remote_group_ports
)
LOG.debug("remote_group: vppf.port_ips:%s",
self.vppf.port_ips)
LOG.debug("remote_group_ip_prefixes:%s for group %s",
remote_ip_prefixes, remote_group)
LOG.debug("remote_group_secgroups: %s",
self.vppf.remote_group_secgroups)
# VPP API requires the IP addresses to be represented in binary
# At this point:
# 1. we convert to the form VPP likes - a packed address
# 2. we fix up the rule. At this point it's what Neutron gave us
# and Neutron doesn't strictly check that the rule is a network
# address compatible with the mask, but VPP cares. Our assumption
# is that only bits significant relative to the mask are intended
# to matter, though that's ill-defined in the Neutron API.
rules = []
for ip_addr, ip_prefix_len in remote_ip_prefixes:
# OpenStack should provide a network address here, but
# doesn't correctly validate input.
net = ip_interface((ip_addr, int(ip_prefix_len),)).network
packed_addr = net.network_address.packed
rules.append(SecurityGroupRule(r['is_ipv6'],
packed_addr,
ip_prefix_len,
r.get('remote_group_id', None),
r['protocol'],
r['port_min'],
r['port_max']))
return rules
ingress_rules, egress_rules = (
[_secgroup_rule(r) for r in data['ingress_rules']],
[_secgroup_rule(r) for r in data['egress_rules']]
)
# Flatten ingress and egress rules
ingress_rules, egress_rules = (
[rule for rule_list in ingress_rules for rule in rule_list],
[rule for rule_list in egress_rules for rule in rule_list]
)
LOG.debug("remote_group: sec_group: %s, ingress rules: %s "
"egress_rules: %s", secgroup, ingress_rules, egress_rules)
self.vppf.acl_add_replace_on_host(SecurityGroup(secgroup,
ingress_rules,
egress_rules))
def acl_delete(self, secgroup):
"""Delete ACL on host.
Arguments:
secgroup - OpenStack SecurityGroup ID
"""
self.vppf.acl_delete_on_host(secgroup)
def spoof_filter_on_host(self):
"""Deploy anti-spoofing ingress and egress ACLs on VPP.
Tag ingress spoof acl on VPP with ID: FFFF:0
Tag egress spoof acl on VPP with ID: FFFF:1
Add Spoof ACL mapping with Key: "FFFF"
Val: VppAcl(in_idx, out_idx)
to secgroups mapping
"""
self.vppf.spoof_filter_on_host()
def set_mac_ip_acl_on_port(self, mac_address, fixed_ips,
allowed_address_pairs, sw_if_index):
"""Set L2/L3 ACLs on port.
Arguments:-
mac_address - The mac_address assigned to the port
fixed_ips - A list of dictionaries containing the fixed_ips
assigned to the port identified by the key - 'ip_address'
allowed_address_pairs - A list of allowed address pair attributes
- Each address pair is a dict with
keys: ip_address (required)
mac_address (optional)
sw_if_index - VPP vhostuser if_idx
"""
# Allowed mac_ip list to permit for DHCP request from 0.0.0.0
# Allow Ipv6 link local address for neighbor discovery
# mac-ip-acls are egress only ACLs from an instance
lla_address = str(netutils.get_ipv6_addr_by_EUI64(
n_const.IPv6_LLA_PREFIX, mac_address))
allowed_mac_ips = [(mac_address, u'0.0.0.0'),
(mac_address, lla_address)]
# A list of tuples of MAC Addrs. and their corresponding IP Addrs.
fixed_ip_addrs = [ip['ip_address'] for ip in fixed_ips]
mac_ips = [(mac_address, ip_address) for ip_address
in fixed_ip_addrs]
# use the port-mac if a mac_address is not present in the allowed
# address pair
addr_pairs = [(p.get('mac_address', mac_address), p['ip_address'])
for p in allowed_address_pairs]
mac_ips = allowed_mac_ips + mac_ips + addr_pairs
self.vppf.set_mac_ip_acl_on_vpp_port(mac_ips, sw_if_index)
def load_macip_acl_mapping(self) -> None:
"""Load the sw_if_index to mac_ip_acl index mappings on vpp.
Populates self.vppf.port_vpp_acls :
{sw_if_index -> {'l23' : <macip_acl_index>}}
"""
try:
macip_acls = self.vppf.vpp.get_macip_acls()
# The acl position is the sw_if_index
for sw_if_index, acl_index in enumerate(macip_acls):
if acl_index != 4294967295: # Exclude invalid acl index
self.vppf.port_vpp_acls[sw_if_index]['l23'] = acl_index
except ValueError:
pass # vpp_papi throws this error when no ACLs exist
except AttributeError:
pass # cannot reference acl attribute - pass and exit
def update_remote_group_secgroups(self, remote_group):
"""Update the ACLs of all security groups that use a remote-group.
When a remote_group to port association is changed,
i.e. A new port is associated with (or) an existing port is removed,
the agent needs to update the VPP ACLs belonging to all the
security groups that use this remote-group in their rules.
Since this is called from various threads it makes a new etcd
client each call.
"""
secgroups = self.vppf.remote_group_secgroups[remote_group]
LOG.debug("Updating secgroups:%s referencing the remote_group:%s",
secgroups, remote_group)
etcd_client = self.client_factory.client()
etcd_writer = etcdutils.json_writer(etcd_client)
for secgroup in secgroups:
secgroup_key = self.secgroup_key_space + "/%s" % secgroup
# TODO(najoy):Update to the new per thread etcd-client model
# TODO(ijw): all keys really present?
# If the security group is deleted before the agent gets to it,
# handle the exception.
try:
data = etcd_writer.read(secgroup_key).value
LOG.debug("Updating remote_group rules %s for secgroup %s",
data, secgroup)
self.acl_add_replace(secgroup, data)
except etcd.EtcdKeyNotFound:
pass
# EtcdListener Trunking section
def reconsider_trunk_subports(self):
"""Try to bind subports awaiting their parent port to be bound.
If the parent port
- is bound
- instance has connected to the other end of the vhostuser
- security groups has been applied
- is in admin UP state
then:
- bind the subports, and
- set subport state to admin UP
"""
# Get the list of *currently* awaiting subports
# (allows us to change and clear up the dict as we go through them)
awaiting_subports = list(self.subports_awaiting_parents.items())
for parent_port, subports in awaiting_subports:
LOG.debug('reconsidering bind for trunk subports %s, parent %s',
subports, parent_port)
props = self.vppf.interfaces.get(parent_port, None)
# Make sure parent port is really ready
if (props and props['iface_idx'] in self.iface_state and
self.vppf.vhostuser_linked_up(parent_port) and
props['iface_idx'] not in self.iface_awaiting_secgroups):
LOG.debug("Parent trunk port vhostuser ifidx %s is ready",
props['iface_idx'])
self.bind_unbind_subports(parent_port, subports)
self.subports_awaiting_parents.pop(parent_port)
else:
LOG.debug("Parent trunk port is not ready")
def subports_to_unbind(self, parent_port, subports):
"""Return a list of subports to unbind for a parent port.
subports :- A set of subports that need to be currently bound
to the parent port.
"""
# unbind 'bound sub-ports' that are not in the current subports
return self.bound_subports[parent_port] - subports
def subports_to_bind(self, parent_port, subports):
"""Return a list of subports to unbind for a parent port.
subports :- A set of subports that need to be currently bound
to the parent port.
"""
# remove ports from subports that are already bound and only bind the
# new ports.
return subports - self.bound_subports[parent_port]
def bind_unbind_subports(self, parent_port, subports):
"""Bind or unbind the subports of the parent ports as needed.
To unbind all bound subports of a parent port, provide the
parent_port argument with subports set to an empty list.
Sample subports data structure: List of dicts
[{"segmentation_id": 11,
"uplink_seg_id": 149,
"segmentation_type": "vlan",
"uplink_seg_type": "vlan",
"port_id": "9ee91c37-9150-49ff-9ea7-48e98547771a",
"physnet": "physnet1",
"allowed_address_pairs": [],
"port_security_enabled": true,
"security_groups": ["8d55a44a-935d-4296-99ab-b0749b725df4"],
"bound_callback" : bind_notifier_object,
},
{"segmentation_id": 12,
"uplink_seg_id": 139,
"segmentation_type": "vlan",
"uplink_seg_type": "vlan",
"port_id": "2b1a89ba-78f1-4350-b71a-7caf7f23cbcf",
"physnet": "physnet1",
"allowed_address_pairs": [],
"port_security_enabled": true,
"security_groups": ["8d55a44a-935d-4296-99ab-b0749b725df4"],
"bound_callback" : bind_notifier_object,
}
]
"""
LOG.debug('Binding or Unbinding subports %s of parent trunk port %s',
subports, parent_port)
subport_set = set([p['port_id'] for p in subports])
subports_to_bind = self.subports_to_bind(parent_port, subport_set)
LOG.debug('Binding subports %s of a parent trunk port %s',
subports_to_bind, parent_port)
subports_to_unbind = self.subports_to_unbind(parent_port,
subport_set)
LOG.debug('Unbinding subports %s of a parent trunk port %s',
subports_to_unbind, parent_port)
# bind subports we are told to bind
for subport in subports_to_bind:
subport_data = [p for p in subports
if p['port_id'] == subport][0]
LOG.debug('Binding subport %s of parent trunk port %s '
'sub_port_data %s',
subport, parent_port, subport_data)
props = self.vppf.bind_subport_on_host(parent_port, subport_data)
# Bring up the subport
if props:
self.bound_subports[parent_port].add(subport)
subport_iface_idx = props['iface_idx']
LOG.debug("Bringing up the trunk subport vhost ifidx %s",
subport_iface_idx)
self.vppf.ifup(subport_iface_idx)
# Set port security on subport
LOG.debug("Setting port security on trunk subport ifidx %s",
subport_iface_idx)
port_security_enabled = subport_data.get(
'port_security_enabled',
True)
if port_security_enabled:
self.iface_awaiting_secgroups[subport_iface_idx] = \
subport_data.get('security_groups', [])
else:
self.iface_awaiting_secgroups[subport_iface_idx] = None
id = subport_data['port_id']
self.iface_state[subport_iface_idx] = (
id,
subport_data['bound_callback'],
props
)
self.iface_state_ifidx[id] = subport_iface_idx
self.apply_spoof_macip(subport_iface_idx, subport_data, props)
self.maybe_apply_secgroups(subport_iface_idx)
# unbind subports we are told to unbind
for subport in subports_to_unbind:
LOG.debug('Unbinding subport %s of parent_port %s',
subport, parent_port)
if self.iface_state_ifidx[subport] in self.iface_state:
del self.iface_state[self.iface_state_ifidx[subport]]
del self.iface_state_ifidx[subport]
self.vppf.unbind_subport_on_host(subport)
self.bound_subports[parent_port].remove(subport)
AGENT_HEARTBEAT = 60 # seconds
def process_ops(self):
# TODO(ijw): needs to remember its last tick on reboot, or
# reconfigure from start (which means that VPP needs it
# storing, so it's lost on reboot of VPP)
self.port_key_space = LEADIN + "/nodes/%s/ports" % self.host
self.router_key_space = LEADIN + "/nodes/%s/routers" % self.host
self.secgroup_key_space = LEADIN + "/global/secgroups"
self.state_key_space = LEADIN + "/state/%s/ports" % self.host
self.physnet_key_space = LEADIN + "/state/%s/physnets" % self.host
self.remote_group_key_space = LEADIN + "/global/remote_group"
self.trunk_key_space = LEADIN + "/nodes/%s/trunks" % self.host
etcd_client: Optional[etcd.Client]
etcd_helper: Optional[etcdutils.EtcdHelper]
etcd_client = self.client_factory.client()
etcd_helper = etcdutils.EtcdHelper(etcd_client)
# We need certain directories to exist so that we can write to
# and watch them
etcd_helper.ensure_dir(self.port_key_space)
etcd_helper.ensure_dir(self.secgroup_key_space)
etcd_helper.ensure_dir(self.state_key_space)
etcd_helper.ensure_dir(self.physnet_key_space)
etcd_helper.ensure_dir(self.router_key_space)
etcd_helper.ensure_dir(self.remote_group_key_space)
etcd_helper.ensure_dir(self.trunk_key_space)
etcd_helper.clear_state(self.state_key_space)
# py3 note: in py3 keys() does not return a list but the following
# seems to work fine. Enclose in list() is problems arise.
physnets = self.physnets.keys()
etcd_helper.clear_state(self.physnet_key_space)
for f in physnets:
etcd_client.write(self.physnet_key_space + '/' + f, 1)
# We need to be wary not to hand the same client to multiple threads;
# this etcd_helper and client dies here
etcd_helper = None
etcd_client = None
# load sw_if_index to macip acl index mappings
self.load_macip_acl_mapping()
self.binder = BindNotifier(self.client_factory, self.state_key_space)
self.pool.spawn(self.binder.run)
if self.secgroup_enabled:
LOG.debug("loading VppAcl map from acl tags for "
"performing secgroup_watcher lookups")
known_secgroup_ids = self.vppf.populate_secgroup_acl_mappings()
LOG.debug("Adding ingress/egress spoof filters "
"on host for secgroup_watcher spoof blocking")
self.spoof_filter_on_host()
LOG.debug("Spawning secgroup_watcher..")
self.pool.spawn(SecGroupWatcher(self.client_factory.client(),
'secgroup_watcher',
self.secgroup_key_space,
known_secgroup_ids,
heartbeat=self.AGENT_HEARTBEAT,
data=self).watch_forever)
self.pool.spawn(RemoteGroupWatcher(self.client_factory.client(),
'remote_group_watcher',
self.remote_group_key_space,
heartbeat=self.AGENT_HEARTBEAT,
data=self).watch_forever)
# The security group watcher will load the secgroups before
# this point (before the thread is spawned) - that's helpful,
# because it means that the ports will be immediately createable
# as the secgroups are already available.
LOG.debug("Spawning port_watcher")
self.pool.spawn(PortWatcher(self.client_factory.client(),
'port_watcher',
self.port_key_space,
heartbeat=self.AGENT_HEARTBEAT,
data=self).watch_forever)
# Spawn trunk watcher if enabled
if 'vpp-trunk' in cfg.CONF.service_plugins:
LOG.debug("Spawning trunk_watcher")
self.pool.spawn(TrunkWatcher(self.client_factory.client(),
'trunk_watcher',
self.trunk_key_space,
heartbeat=TRUNK_WATCHER_HEARTBEAT,
data=self).watch_forever)
# Spawn GPE watcher for GPE tenant networks
if self.gpe_listener is not None:
self.gpe_listener.spawn_watchers(self.pool,
self.AGENT_HEARTBEAT,
self)
# Spawning after the port bindings are done so that
# the RouterWatcher doesn't do unnecessary work
if 'vpp-router' in cfg.CONF.service_plugins:
if cfg.CONF.ml2_vpp.enable_l3_ha:
LOG.info("L3 HA is enabled")
LOG.debug("Spawning router_watcher")
self.pool.spawn(RouterWatcher(self.client_factory.client(),
'router_watcher',
self.router_key_space,
heartbeat=self.AGENT_HEARTBEAT,
data=self).watch_forever)
self.pool.waitall()
class PortWatcher(etcdutils.EtcdChangeWatcher):
def __init__(self, *args, **kwargs):
super(PortWatcher, self).__init__(*args, **kwargs)
self.etcd_client.write(LEADIN + '/state/%s/alive' %
self.data.host,
1, ttl=3 * self.heartbeat)
def do_tick(self):
# The key that indicates to people that we're alive
# (not that they care)
self.etcd_client.refresh(LEADIN + '/state/%s/alive' %
self.data.host,
ttl=3 * self.heartbeat)
def init_resync_start(self):
"""Identify known ports in VPP
We are beginning a resync because the agent has
restarted. We should be fixing VPP with the least
disruption possible so that traffic being passed by VPP
on currently configured ports is not disrupted. As such,
this goes to find correctly configured ports (which -
if still required - will be left alone) and removes
structures that have been partially or incorrectly set up.
"""
self.expected_keys = self.data.vpp_restart_prepare()
def removed(self, port):
# Removing key == desire to unbind
try:
is_gpe = False
port_data = self.data.vppf.interfaces[port]
port_net = port_data['net_data']
is_gpe = port_net['network_type'] == TYPE_GPE \
and self.data.gpe_listener is not None
if is_gpe:
# Get seg_id and mac to delete any gpe mappings
seg_id = port_net['segmentation_id']
mac = port_data['mac']
except KeyError:
# On initial resync, this information may not
# be available; also, the network may not
# be gpe
if is_gpe:
LOG.warning('Unable to delete GPE mappings for port')
self.data.unbind(port)
# Unlike bindings, unbindings are immediate.
try:
self.etcd_client.delete(
self.data.state_key_space + '/%s'
% port)
if is_gpe:
self.data.gpe_listener.delete_etcd_gpe_remote_mapping(
seg_id, mac)
except etcd.EtcdKeyNotFound:
# Gone is fine; if we didn't delete it
# it's no problem
pass
def added(self, port, value):
# Create or update == bind
# In EtcdListener, bind *ensures correct
# binding* and is idempotent. It will also
# fix up security if the security state has
# changed. NB most things will not change on
# an update.
data = jsonutils.loads(value)
# For backward comatibility reasons, 'plugtap' now means 'tap'
# Post-17.07 'tap' is used, but this allows compatibility with
# previously stored information in etcd.
binding_type = data['binding_type']
if binding_type == 'plugtap':
binding_type = 'tap'
self.data.bind(
self.data.binder.add_notification,
port,
binding_type,
# NB(onong): VM's mac is needed to be programmed as the lisp local
# eid for data flow in gpe networks across compute nodes so please
# do not change the line below without proper consideration.
data['mac_address'],
data['physnet'],
data['network_type'],
data['segmentation_id'],
data # TODO(ijw) convert incoming to security fmt
)
# While the bind might fail for one reason or another,
# we have nothing we can do at this point. We simply
# decline to notify Nova the port is ready.
# For GPE networks,
# write the remote mapping data to etcd to
# propagate both the mac to underlay mapping and
# mac to instance's IP (for ARP) mapping to all
# agents that bind this segment using GPE
if data['network_type'] == TYPE_GPE \
and self.data.gpe_listener is not None:
# NB(onong): The VM's mac needs to be programmed in the remote
# mappings. Without this no communication is possible between VMs
# running on separate compute nodes.
mac = data['mac_address']
for ip in [ip['ip_address'] for ip in data.get('fixed_ips')]:
self.data.gpe_listener.add_etcd_gpe_remote_mapping(
data['segmentation_id'], mac, ip)
class RouterWatcher(etcdutils.EtcdChangeWatcher):
"""Start an etcd watcher for router operations.
Starts an etcd watcher on the /router directory for
this node. This watcher is responsible for consuming
Neutron router CRUD operations.
"""
# TODO(ijw): consider how to remove GPE references from the router
# code, as they *should* be dealt with by port binding functions.
def do_tick(self):
pass
def parse_key(self, router_key):
"""Parse the key into two tokens and return a tuple.
The returned tuple is denoted by (token1, token2).
If token1 == "floatingip", then token2 is the ID of the
floatingip that is added or removed on the server.
If, token1 == router_ID and token2 == port_ID of the router
interface that is added or removed.
If, token1 == 'ha', then we return that token for router watcher
to action.
"""
m = re.match('([^/]+)' + '/([^/]+)', router_key)
floating_ip, router_id, port_id = None, None, None
if m and m.group(1) and m.group(2):
if m.group(1) == 'floatingip':
floating_ip = m.group(2)
return ('floatingip', floating_ip)
else:
router_id = m.group(1)
port_id = m.group(2)
return (router_id, port_id)
else:
return (None, None)
def add_remove_gpe_mappings(self, port_id, router_data, is_add=1):
"""Add a GPE mapping to the router's loopback mac-address."""
if router_data.get('external_gateway_info', False):
loopback_mac = self.data.vppf.router_external_interfaces[
port_id]['mac_address']
else:
loopback_mac = self.data.vppf.router_interfaces[
port_id]['mac_address']
# GPE remote mappings are added for only the master L3 router,
# if ha_enabled
ha_enabled = cfg.CONF.ml2_vpp.enable_l3_ha
if is_add:
if (ha_enabled and self.data.vppf.router_state) or not ha_enabled:
self.data.gpe_listener.add_etcd_gpe_remote_mapping(
router_data['segmentation_id'],
loopback_mac,
router_data['gateway_ip'])
else:
self.data.gpe_listener.delete_etcd_gpe_remote_mapping(
router_data['segmentation_id'],
loopback_mac)
def added(self, router_key, value):
token1, token2 = self.parse_key(router_key)
if token1 and token2:
if token1 != 'floatingip':
port_id = token2
router_data = jsonutils.loads(value)
self.data.vppf.ensure_router_interface_on_host(
port_id, router_data)
self.data.vppf.maybe_associate_floating_ips()
if router_data.get('net_type') == TYPE_GPE:
self.add_remove_gpe_mappings(port_id, router_data,
is_add=1)
else:
floating_ip = token2
floatingip_dict = jsonutils.loads(value)
self.data.vppf.associate_floatingip(floating_ip,
floatingip_dict)
if cfg.CONF.ml2_vpp.enable_l3_ha and router_key == 'ha':
LOG.debug('Setting VPP-Router HA State..')
router_state = bool(jsonutils.loads(value))
LOG.debug('Router state is: %s', router_state)
# Become master if a state is True, else become backup
state = 'MASTER' if router_state else 'BACKUP'
LOG.debug('VPP Router HA state has become: %s', state)
self.data.vppf.router_state = router_state
if router_state:
self.data.vppf.become_master_router()
else:
self.data.vppf.become_backup_router()
# Update remote mappings for GPE bound router ports
if self.data.gpe_listener:
self.data.gpe_listener.update_router_gpe_mappings()
def removed(self, router_key):
token1, token2 = self.parse_key(router_key)
if token1 and token2:
if token1 != 'floatingip':
port_id = token2
router_data = self.data.vppf.router_interfaces.get(port_id)
# Delete the GPE mapping first as we need to lookup the
# router interface mac-address from vppf
if router_data and router_data.get('net_type') == TYPE_GPE:
self.add_remove_gpe_mappings(port_id, router_data,
is_add=0)
self.data.vppf.delete_router_interface_on_host(port_id)
else:
floating_ip = token2
self.data.vppf.disassociate_floatingip(floating_ip)
class SecGroupWatcher(etcdutils.EtcdChangeWatcher):
def __init__(self, etcd_client, name, watch_path,
known_keys,
**kwargs):
self.known_keys = known_keys
super(SecGroupWatcher, self).__init__(
etcd_client, name, watch_path, **kwargs)
def init_resync_start(self):
# TODO(ijw): we should probably do the secgroup work
# here rather than up front
return self.known_keys
def do_tick(self):
pass
def removed(self, secgroup):
self.data.acl_delete(secgroup)
def added(self, secgroup, value):
# create or update a secgroup == add_replace vpp acl
data = jsonutils.loads(value)
self.data.acl_add_replace(secgroup, data)
self.data.reconsider_port_secgroups()
class RemoteGroupWatcher(etcdutils.EtcdChangeWatcher):
"""Details on how the remote-group-id rules are updated by the vpp-agent.
This thread watches the remote-group key space.
When VM port associations to security groups are updated, this thread
receives an etcd watch event from the server. From the watch event,
the thread figures out the set of ports associated with the
remote-group-id and the IP addresses of each port.
After this, this thread updates two data structures.
The first one is a dictionary named port_ips, used to keep track of
the ports to their list of IP addresses. It has the port UUID as the key,
and the value is it's set of IP addresses. The second DS is a dict named
remote_group_ports. This is used to keep track of port memberships in
remote-groups. The key is the remote_group_id and the value is the set of
ports associated with it. These two dictionaries are updated by the thread
whenever watch events are received, so the agent always has up to date
information on ports, their IPs and the remote-groups association.
The RemoteGroupWatcher thread then calls a method named
update_remote_group_secgroups with the remote_group_id as the argument.
This method figures out which secgroups need to be updated as a result of
the watch event. This is done by looking up another dict named
remote_group_secgroups that keeps track of all the secgroups that are
referencing the remote-group-id inside their rules.
The key is the remote-group, and the value is the set of secgroups that
are dependent on it.
The update_remote_group_secgroups method then reads the rules for each of
these referencing security-groups and sends it to the method named
acl_add_replace with the security-group-uuid and rules as the argument.The
acl_add_replace method takes each rule that contains the remote-group-id
and computes a product using the list of IP addresses belonging to all
the ports in the remote-group. It then calls the acl_add_replace method
in vppf to atomically update the relevant VPP ACLs for the security-group.
"""
def do_tick(self):
pass
def parse_key(self, remote_group_key):
m = re.match('([^/]+)' + '/([^/]+)', remote_group_key)
remote_group_id, port_id = None, None
if m:
remote_group_id = m.group(1)
port_id = m.group(2)
return (remote_group_id, port_id)
def added(self, remote_group_key, value):
# remote_group_key format is "remote_group_id/port_id"
# Value is a list of IP addresses
remote_group_id, port_id = self.parse_key(remote_group_key)
if value and remote_group_id and port_id:
ip_addrs = jsonutils.loads(value)
# The set of IP addresses configured on a port
self.data.vppf.port_ips[port_id] = set(ip_addrs)
# The set of ports in a security-group
self.data.vppf.remote_group_ports[remote_group_id].update(
[port_id])
LOG.debug("Current remote_group_ports: %s port_ips: %s",
self.data.vppf.remote_group_ports,
self.data.vppf.port_ips)
self.data.update_remote_group_secgroups(remote_group_id)
def removed(self, remote_group_key):
remote_group_id, port_id = self.parse_key(remote_group_key)
if remote_group_id and port_id:
# Remove the port_id from the remote_group
self.data.vppf.remote_group_ports[
remote_group_id].difference_update([port_id])
LOG.debug("Current remote_group_ports: %s port_ips: %s",
self.data.vppf.remote_group_ports,
self.data.vppf.port_ips)
self.data.update_remote_group_secgroups(remote_group_id)
class TrunkWatcher(etcdutils.EtcdChangeWatcher):
"""Watches trunk parent/subport bindings on the host and takes actions.
Trunk keyspace format.
/networking-vpp/nodes/<node-name>/trunks/<UUID of the trunk>
Sample data format:
{"status": "ACTIVE",
"name": "trunk-new",
"admin_state_up": true,
"sub_ports": [
{"segmentation_id": 11,
"uplink_seg_id": 149,
"segmentation_type": "vlan",
"uplink_seg_type": "vlan",
"port_id": "9ee91c37-9150-49ff-9ea7-48e98547771a",
"physnet": "physnet1",
"allowed_address_pairs": [],
"mac_address": "fa:16:3e:c4:80:dc",
"port_security_enabled": true,
"fixed_ips": [{"subnet_id": "05cfd12c-9db8-4f55-a2b9-aca89f412932",
"ip_address": "10.110.110.7"}],
"security_groups": ["8d55a44a-935d-4296-99ab-b0749b725df4"]},
{"segmentation_id": 12,
"uplink_seg_id": 139,
"segmentation_type": "vlan",
"uplink_seg_type": "vlan",
"port_id": "2b1a89ba-78f1-4350-b71a-7caf7f23cbcf",
"physnet": "physnet1",
"allowed_address_pairs": [],
"mac_address": "fa:17:3e:c4:80:de",
"port_security_enabled": true,
"fixed_ips": [{"subnet_id": "05cfd12c-9db8-4f55-a2b9-aca89f412932",
"ip_address": "10.110.110.8"}],
"security_groups": ["8d55a44a-935d-4296-99ab-b0749b725df4"]},
]}
How does it work?
The ml2 server:
1) Writes above etcd key/value when a trunk port is bound on the host.
2) Updates the above value when subports on a bound trunk are updated.
3) Deletes the key when the trunk is unbound.
The trunkwatcher receives the watch event and it figures out whether
it should perform a bind or unbind action on the parent and its subport
and performs it.
"""
def do_tick(self):
"""Invoked every TRUNK_WATCHER_HEARTBEAT secs"""
# Check if there are child ports to be bound and brought UP
self.data.reconsider_trunk_subports()
def added(self, parent_port, value):
"""Bind and unbind sub-ports of the parent port."""
data = jsonutils.loads(value)
LOG.debug('trunk watcher received add for parent_port %s '
'with data %s', parent_port, data)
# Due to out-of-sequence etcd watch events during an agent restart,
# we do not yet know at this point whether the parent port is setup.
# So, we'll add it to the awaiting parents queue and reconsider it.
subport_data = data['sub_ports']
for subport in subport_data:
subport['bound_callback'] = lambda *args: None
self.data.subports_awaiting_parents[parent_port] = subport_data
# reconsider awaiting sub_ports
self.data.reconsider_trunk_subports()
def removed(self, parent_port):
"""Unbind all sub-ports and then unbind the parent port."""
LOG.debug('trunk watcher received unbound for parent port %s ',
parent_port)
# First, unbind all subports
self.data.bind_unbind_subports(parent_port, subports=[])
# Then, unbind the parent port if it has no subports
if not self.data.bound_subports[parent_port]:
LOG.debug('Unbinding the parent port %s', parent_port)
self.data.vppf.unbind_interface_on_host(parent_port)
class BindNotifier(object):
"""A thread to return bind-complete notifications to the server.
This notifies the completion of a bind by writing a state key with
the details of VPP's config (the other end doesn't care about the
content, only the key's presence, so this is purely a debugging
issue) to etcd.
"""
def __init__(self, client_factory, state_key_space):
# An infinite queue over which we receive notifications
self.notifications = eventlet.queue.Queue()
self.state_key_space = state_key_space
self.etcd_client = client_factory.client()
self.etcd_writer = etcdutils.json_writer(self.etcd_client)
def add_notification(self, id, content):
"""Queue a notification for sending to Nova
Nova watches a key's existence before sending out bind events.
We set the key, and use the value to store debugging
information.
"""
self.notifications.put((id, content,))
def run(self):
while(True):
try:
ent = self.notifications.get()
(port, props) = ent
# TODO(ijw): do we ever clean this space up?
self.etcd_writer.write(
self.state_key_space + '/%s' % port,
props)
except Exception:
# We must keep running, but we don't expect problems
LOG.exception("exception in bind-notify thread")
# If there are problems, retry the notification later.
# There's no issue if we do this multiple times.
self.add_notification(port, props)
class VPPRestart(object):
def __init__(self):
self.timeout = 10 # VPP connect timeout in seconds
LOG.debug("Agent is restarting VPP")
utils.execute(['service', 'vpp', 'restart'], run_as_root=True)
def wait(self):
time.sleep(self.timeout) # TODO(najoy): check if vpp is actually up
def openstack_base_setup(process_name):
"""General purpose entrypoint
Sets up non-specific bits (the integration with OpenStack and its
config, and so on).
"""
# Arguments, config files and options
cfg.CONF(sys.argv[1:])
# General logging
logging.setup(cfg.CONF, process_name)
# Guru meditation support enabled
gmr_opts.set_defaults(cfg.CONF)
gmr.TextGuruMeditation.setup_autorun(
version.version_info,
service_name='vpp-agent')
def main():
"""Main function for VPP agent functionality."""
openstack_base_setup('vpp_agent')
setup_privsep()
neutron.conf.plugins.ml2.config.register_ml2_plugin_opts(cfg.CONF)
neutron.conf.agent.securitygroups_rpc.register_securitygroups_opts(
cfg.CONF)
config_opts.register_vpp_opts(cfg.CONF)
# Pull physnets out of config and interpret them
if not cfg.CONF.ml2_vpp.physnets:
LOG.critical("Missing physnets config. Exiting...")
sys.exit(1)
physnet_list = cfg.CONF.ml2_vpp.physnets.replace(' ', '').split(',')
physnets = {}
for f in physnet_list:
if f:
try:
(k, v) = f.split(':')
except Exception:
LOG.error("Could not parse physnet to interface mapping "
"check the format in the config file: "
"physnets = physnet1:<interface1>, "
"physnet2:<interface>")
sys.exit(1)
if len(v) > MAX_PHYSNET_LENGTH:
LOG.error("Physnet '%(physnet_name)s' is longer than "
"%(len)d characters.",
{'physnet_name': v, 'len': MAX_PHYSNET_LENGTH})
sys.exit(1)
physnets[k] = v
# Deal with VPP-side setup
if cfg.CONF.ml2_vpp.enable_vpp_restart:
VPPRestart().wait()
# Convert to the minutes unit that VPP uses:
# (we round *up*)
# py3 note: using // since we want integer division
mac_age_min = int((cfg.CONF.ml2_vpp.mac_age + 59) // 60)
vppf = VPPForwarder(physnets,
mac_age=mac_age_min,
vpp_cmd_queue_len=cfg.CONF.ml2_vpp.vpp_cmd_queue_len
)
# Deal with etcd-side setup
LOG.debug("Using etcd host:%s port:%s user:%s password:***",
cfg.CONF.ml2_vpp.etcd_host,
cfg.CONF.ml2_vpp.etcd_port,
cfg.CONF.ml2_vpp.etcd_user)
client_factory = etcdutils.EtcdClientFactory(cfg.CONF.ml2_vpp)
# Do the work
ops = EtcdListener(cfg.CONF.host, client_factory, vppf, physnets)
names = cfg.CONF.ml2_vpp.vpp_agent_extensions
if names != '':
mgr = ExtensionManager(
'networking_vpp.vpp_agent.extensions',
names,
VPPAgentExtensionBase)
mgr.call_all('run', cfg.CONF.host, client_factory, vppf, ops.pool)
ops.process_ops()
if __name__ == '__main__':
main()
|
<filename>networking_vpp/agent/server.py
# Copyright (c) 2017 Cisco Systems, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Note that it does *NOT* at this point have a persistent database, so
# restarting this process will make net-vpp forget about every port it's
# learned, which will not do your system much good (the data is in the
# global 'backends' and 'ports' objects). This is for simplicity of
# demonstration; we have a second codebase already defined that is
# written to OpenStack endpoint principles and includes its ORM, so
# that work was not repeated here where the aim was to get the APIs
# worked out. The two codebases will merge in the future.
from __future__ import absolute_import
# eventlet must be monkey patched early or we confuse urllib3.
import eventlet
eventlet.monkey_patch()
import binascii
from collections import defaultdict
from collections import namedtuple
import etcd
import eventlet.semaphore
from ipaddress import ip_address, ip_interface, ip_network
import os
import re
import shlex
import sys
import time
from networking_vpp.agent import gpe
from networking_vpp.agent import network_interface
from networking_vpp.agent import vpp
from networking_vpp import config_opts
from networking_vpp import constants as nvpp_const
from networking_vpp import etcdutils
from networking_vpp.ext_manager import ExtensionManager
from networking_vpp.extension import VPPAgentExtensionBase
from networking_vpp.mech_vpp import SecurityGroup
from networking_vpp.mech_vpp import SecurityGroupRule
from networking_vpp.utils import device_monitor_async
from networking_vpp.utils import file_monitor
from networking_vpp import version
import neutron_lib.constants as n_const
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
import neutron.conf.agent.securitygroups_rpc
import neutron.conf.plugins.ml2.config
from oslo_config import cfg
from oslo_log import log as logging
from oslo_privsep import priv_context
from oslo_reports import guru_meditation_report as gmr
from oslo_reports import opts as gmr_opts
from oslo_serialization import jsonutils
from oslo_utils import netutils
from typing import cast, Callable, Dict, TypeVar, Union, Optional, Set # noqa
TYPE_GPE = nvpp_const.TYPE_GPE
LOG = logging.getLogger(__name__)
# A model of a bi-directional VPP ACL corresponding to a secgroup
VppAcl = namedtuple('VppAcl', ['in_idx', 'out_idx'])
# TODO(najoy) Expose the below as a config option
# Enable stateful reflexive ACLs in VPP which adds automatic reverse rules
# When False, reverse rules are added by the vpp-agent and
# VPP does not maintain any session states
reflexive_acls = True
# We use eventlet for everything but threads. Here, we need an eventlet-based
# locking mechanism, so we call out eventlet specifically rather than using
# threading.Semaphore.
#
# Our own, strictly eventlet, locking:
_semaphores: Dict[str, eventlet.semaphore.Semaphore] = \
defaultdict(eventlet.semaphore.Semaphore)
def get_root_helper(conf) -> str:
"""Root helper configured for privilege separation"""
return conf.AGENT.root_helper
def setup_privsep() -> None:
"""Use root helper (if present) to execute privileged commands"""
priv_context.init(root_helper=shlex.split(get_root_helper(cfg.CONF)))
CV = TypeVar('CV', bound=Callable)
def eventlet_lock(name: str) -> Callable[[CV], CV]:
sema = _semaphores[name]
def eventlet_lock_decorator(func: CV) -> CV:
def func_wrap(*args, **kwargs):
LOG.debug("Acquiring lock '%s' before executing %s" %
(name, func.__name__))
with sema:
LOG.debug("Acquired lock '%s' before executing %s" %
(name, func.__name__))
return func(*args, **kwargs)
return cast(CV, func_wrap)
return eventlet_lock_decorator
######################################################################
# This mirrors functionality in Neutron so that we're creating a name
# that Neutron can find for its agents.
DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX
######################################################################
# TODO(ijw): should be pulled from Neutron or Nova - this naming
# scheme is common between both
TAP_UUID_LEN = 11
def get_tap_name(uuid):
# type: (str) -> str
return n_const.TAP_DEVICE_PREFIX + uuid[0:TAP_UUID_LEN]
def get_bridge_name(uuid):
# type: (str) -> str
return 'br-' + uuid[0:TAP_UUID_LEN]
# This is our internal name and the other end neither knows or cares about
# it, only the bridge we put it in
def get_vpptap_name(uuid):
# type: (str) -> str
return 'vpp' + uuid[0:TAP_UUID_LEN]
def default_if_none(x, default):
return default if x is None else x
######################################################################
def VPP_TAG(tag):
return 'net-vpp.' + tag
# Interface tagging naming scheme :
# tap and vhost interfaces: port:<uuid>
# Uplink Connectivity: uplink:<net_type>.<seg_id>
# MAX_PHYSNET_LENGTH + the tag format must be <= the 64 bytes of a VPP tag
MAX_PHYSNET_LENGTH = 32
TAG_PHYSNET_IF_PREFIX = VPP_TAG('physnet:')
TAG_UPLINK_PREFIX = VPP_TAG('uplink:')
TAG_L2IFACE_PREFIX = VPP_TAG('port:')
def get_vhostuser_name(uuid):
return os.path.join(cfg.CONF.ml2_vpp.vhost_user_dir, uuid)
def physnet_if_tag(physnet_name):
return TAG_PHYSNET_IF_PREFIX + physnet_name
def decode_physnet_if_tag(tag):
if tag is None:
return None
m = re.match('^' + TAG_PHYSNET_IF_PREFIX + '([^.]+)$', tag)
return None if m is None else m.group(1)
def uplink_tag(physnet, net_type, seg_id):
return TAG_UPLINK_PREFIX + '%s.%s.%s' % (physnet, net_type, seg_id)
def decode_uplink_tag(tag):
"""Spot an uplink interface tag.
Return (net_type, seg_id) or None if not an uplink tag
"""
if tag is None:
return None # not tagged
m = re.match('^' + TAG_UPLINK_PREFIX + r'([^.]+)\.([^.]+)\.([^.]+)$', tag)
return None if m is None else (m.group(1), m.group(2), m.group(3))
def port_tag(port_uuid):
return TAG_L2IFACE_PREFIX + str(port_uuid)
def decode_port_tag(tag):
"""Spot a port interface tag
Return uuid or None if not a port interface tag.
"""
if tag is None:
return None # not tagged
m = re.match('^' + TAG_L2IFACE_PREFIX + '(' + n_const.UUID_PATTERN + ')$',
tag)
return None if m is None else m.group(1)
######################################################################
# Security group tag formats used to tag ACLs in VPP for
# re-identification on restart
# When leaving VPP and entering the VM
VPP_TO_VM = 1
# When leaving the VM and entering VPP
VM_TO_VPP = 0
VPP_TO_VM_MARK = 'from-vpp'
VM_TO_VPP_MARK = 'to-vpp'
def VPP_TO_VM_TAG(tag):
return tag + '.' + VPP_TO_VM_MARK
def VM_TO_VPP_TAG(tag):
return tag + '.' + VM_TO_VPP_MARK
def DIRECTION_TAG(tag, is_vm_ingress):
if is_vm_ingress:
return VPP_TO_VM_TAG(tag)
else:
return VM_TO_VPP_TAG(tag)
COMMON_SPOOF_TAG = VPP_TAG('common_spoof')
COMMON_SPOOF_VPP_TO_VM_TAG = VPP_TO_VM_TAG(COMMON_SPOOF_TAG)
COMMON_SPOOF_VM_TO_VPP_TAG = VM_TO_VPP_TAG(COMMON_SPOOF_TAG)
def common_spoof_tag(is_vm_ingress):
if is_vm_ingress:
return COMMON_SPOOF_VPP_TO_VM_TAG
else:
return COMMON_SPOOF_VM_TO_VPP_TAG
def decode_common_spoof_tag(tag):
"""Work out if this tag is one of our common spoof filter tags
"""
if COMMON_SPOOF_VPP_TO_VM_TAG == tag:
return 1
if COMMON_SPOOF_VM_TO_VPP_TAG == tag:
return 0
return None
SECGROUP_TAG = VPP_TAG('secgroup:')
def secgroup_tag(secgroup_id, is_vm_ingress):
base_tag = SECGROUP_TAG + secgroup_id
return DIRECTION_TAG(base_tag, is_vm_ingress)
def decode_secgroup_tag(tag):
# Matches the formats constructed earlier
m = re.match('^' + SECGROUP_TAG + '(' + n_const.UUID_PATTERN + r')\.(.*)$',
tag)
if m:
secgroup_id = m.group(1)
dirmark = m.group(2)
is_vm_ingress = dirmark == VPP_TO_VM_MARK
return secgroup_id, is_vm_ingress
return None, None
class UnsupportedInterfaceException(Exception):
"""Used when ML2 has tried to ask for a weird binding type."""
pass
class VPPForwarder(object):
"""Convert agent requirements into VPP calls
This class has no interaction with etcd; other classes have no
interaction with VPP. The job of this class is to turn the
demands of etcd's data into VPP constructs.
"""
def __init__(self,
physnets, # physnet_name: interface-name
mac_age,
vpp_cmd_queue_len=None):
self.vpp = vpp.VPPInterface(LOG, vpp_cmd_queue_len)
self.net_driver = network_interface.NetworkDriverManager(self)
self.physnets = physnets
self.mac_age = mac_age
# a Mapping of security groups to VPP ACLs
self.secgroups = {} # secgroup_uuid: VppAcl(ingress_idx, egress_idx)
# Security group UUID to the set of associated port UUIDs
self.remote_group_ports = defaultdict(set)
# Port UUID to its set of IP addresses
self.port_ips = defaultdict(set)
# Remote-group UUID to the set to security-groups that uses it
self.remote_group_secgroups = defaultdict(set)
# ACLs we ought to delete
self.deferred_delete_secgroups = set()
# Enable the GPE forwarder programming, if required
self.gpe: Optional[gpe.GPEForwarder]
if TYPE_GPE in cfg.CONF.ml2.type_drivers:
self.gpe = gpe.GPEForwarder(self)
else:
self.gpe = None
self.interfaces = {} # uuid: if idx
self.router_interfaces = {} # router_port_uuid: {}
self.router_external_interfaces = {} # router external interfaces
self.floating_ips = {} # floating_ip_uuid: {}
if cfg.CONF.ml2_vpp.enable_l3_ha:
# Router BVI (loopback) interface states for L3-HA
self.router_interface_states = {} # {idx: state} 1 = UP, 0 = DOWN
# VPP Router state variable is updated by the RouterWatcher
# The default router state is the BACKUP.
# If this node should be the master it will be told soon enough,
# and this will prevent us from having two masters on any restart.
self.router_state = False # True = Master; False = Backup
# mac_ip acls do not support atomic replacement.
# Here we create a mapping of sw_if_index to VPP ACL indices
# so we can easily lookup the ACLs associated with the interface idx
# sw_if_index: {"l34": [l34_acl_indxs], "l23": l23_acl_index }
self.port_vpp_acls: Dict[vpp.if_idx_t, dict] = defaultdict(dict)
# key: OpenStack port UUID; present when vhost-user is
# connected and removed when we delete things. May accumulate
# any other VPP interfaces too, but that's harmless.
self.port_connected: Set[str] = set()
self.vhost_ready_callback = None
eventlet.spawn_n(self.vhost_notify_thread)
# Thread to drain the queues for binding tap interfaces into Linux
# bridges
eventlet.spawn_n(self.tap_notify_thread)
# External devices detected by the device monitor
self.external_devices = eventlet.queue.Queue()
# Device monitor to ensure the tap interfaces are plugged into the
# right Linux bridge
self.async_devmon = device_monitor_async.AsyncDeviceMonitor()
self.async_devmon.on_add(self._consider_external_device)
# The worker will be in endless loop, so don't care the return value
self.async_devmon.start()
# Start Vhostsocket filemonitor to bind sockets as soon as they appear.
self.filemonitor = file_monitor.FileMonitor(
watch_pattern=n_const.UUID_PATTERN,
watch_dir=cfg.CONF.ml2_vpp.vhost_user_dir)
# Register to handle ON_CREATE event.
self.filemonitor.register_on_add_cb(
self.ensure_interface_for_vhost_socket_binding)
# Register to handle ON_DELETE event.
# We are expecting the port unbinding call flow to clean up vhost
# sockets, hence ignoring delete events on vhost file handle.
self.filemonitor.register_on_del_cb(lambda *args: None)
# Finally start the file monitor.
eventlet.spawn_n(self.filemonitor.run)
########################################
# Port resyncing on restart
def fix_physnets(self, physnets):
"""Fix or remove networks where uplinks have changed in config
- fixes uplink interfaces from VPP where they've changed in
config or where the config didn't fully get pushed
to VPPFowarder
- deletes interfaces and networks from VPP where the
the physical network is no longer configured
- evicts ports from bridges with no network
"""
# One uplink per network
uplink_ports_found = []
# One physnet can serve multiple uplinks
physnet_ports_found = {}
for f in self.vpp.get_interfaces():
# Find uplink ports on OpenStack networks
uplink_data = decode_uplink_tag(f['tag'])
if uplink_data is not None:
uplink_physnet, net_type, seg_id = uplink_data
uplink_ports_found.append([
uplink_physnet, net_type, seg_id,
f['sw_if_idx'],
f['sw_if_idx'] if f['sup_sw_if_idx'] is None
else f['sup_sw_if_idx']])
# Find physical network ports
physnet_name = decode_physnet_if_tag(f['tag'])
if physnet_name is not None:
physnet_ports_found[physnet_name] = f['sw_if_idx']
# Find physnets we intend according to the config
configured_physnet_interfaces = {}
for name, if_name in physnets.items():
# Can be 'None', that's fine as it won't match anything later
configured_physnet_interfaces[name] = \
self.vpp.get_ifidx_by_name(if_name)
LOG.debug('Configured physnets %s',
', '.join(sorted(configured_physnet_interfaces.keys())))
for uplink_physnet, net_type, seg_id, sw_if_idx, sup_sw_if_idx \
in uplink_ports_found:
# Delete networks with a physnet whose config changed
if (uplink_physnet not in configured_physnet_interfaces
or (sup_sw_if_idx !=
configured_physnet_interfaces[uplink_physnet])):
LOG.warning('Deleting outdated network in VPP: net type '
'%(type)s physnet %(physnet)s seg id %(seg)s, '
'physnet if %(physif)d uplink %(uplinkif)d',
{'type': net_type,
'physnet': uplink_physnet,
'seg': str(seg_id),
'physif': sup_sw_if_idx,
'uplinkif': sw_if_idx})
if uplink_physnet not in configured_physnet_interfaces:
LOG.warning('This physnet is no longer in the config')
else:
LOG.warning(
'This physnet now uses interface '
'%(idx)d (%(physnet_name)s)',
{'idx': configured_physnet_interfaces[uplink_physnet],
'physnet_name': physnets[uplink_physnet]})
# This will remove ports from bridges, which means
# that they may be rebound back into networks later
# or may be deleted if no longer used.
self.delete_network_bridge_on_host(net_type,
sw_if_idx,
sw_if_idx)
for name, if_idx in physnet_ports_found.items():
if configured_physnet_interfaces.get(name, None) != if_idx:
# This configuration has changed.
# Untag the original physnet interface, which is no
# longer used as a physnet
LOG.warning('Removing old physnet from VPP: '
'physnet %(physnet_name)s interface %(idx)s',
{'physnet_name': name,
'idx': str(if_idx)})
# In case there was a flat network, make sure the flat
# network bridge no longer exists
self.delete_network_bridge_on_host('flat', if_idx, if_idx)
self.vpp.set_interface_tag(if_idx, None)
# The remaining networks (with uplinks and bridge domains) are
# functional, and idempotent binding will do nothing to
# interfaces in the right bridges. It will fix those in the
# wrong bridges.
# Dead bridges have been deleted and binding
# will find a new home for the interfaces that still exist.
def find_bound_ports(self):
"""Assuming no local data, find bound ports in VPP
This analyses the tags to identify ports in VPP that
have been bound by this process before it restarted.
"""
bound_ports = set()
for f in self.vpp.get_interfaces():
# Find downlink ports
port_id = decode_port_tag(f['tag'])
if port_id is not None:
bound_ports.add(port_id)
return bound_ports
########################################
def vhost_notify_thread(self):
"""Find vhostuser connections with an attached VM
The moment of VM attachment is useful, as it's one of the
preconditions for notifying Nova a socket is ready. Watching
the vhostuser data inside VPP has a performance impact on
forwarding, so instead we watch the kernel's idea of which
vhostuser connections are properly opened.
Having two open sockets is 99% ready - technically, the interface
is ready when VPP has mapped its memory, but these two events are
nearly contemporaenous, so this is good enough.
"""
dirname = cfg.CONF.ml2_vpp.vhost_user_dir
# We need dirname to have precisely one trailing slash.
dirname = dirname.rstrip('/') + '/'
while True:
opens: Dict[str, int] = defaultdict(int)
with open('/proc/net/unix') as content:
# Track unix sockets in vhost directory that are opened more
# than once
for f in content:
# Problems with fnames with spaces in, though
_, fname = f.rsplit(' ', 1)
if fname.startswith(dirname):
fname = fname[len(dirname):].rstrip("\n")
opens[fname] = opens[fname] + 1
# Report on any sockets that are open exactly twice (VPP + KVM)
# (note list clone so that we can delete entries)
for f in list(opens.keys()):
if opens[f] != 2:
del opens[f]
open_names: Set[str] = set(opens.keys())
open_notifications: Set[str] = open_names - self.port_connected
# .. we don't have to notify the port drops, that's fine
# Update this *before* making callbacks so that this register is up
# to date
self.port_connected = open_names
if self.vhost_ready_callback:
for uuid in open_notifications:
self.vhost_ready_callback(uuid)
eventlet.sleep(1)
def vhostuser_linked_up(self, uuid):
return uuid in self.port_connected
def vhostuser_unlink(self, uuid):
self.port_connected.discard(uuid)
########################################
def ifup(self, ifidx):
"""Proxy for VPP's ifup."""
self.vpp.ifup(ifidx)
########################################
def get_if_for_physnet(self, physnet):
"""Find (and mark used) the interface for a physnet"""
ifname = self.physnets.get(physnet, None)
if ifname is None:
LOG.error('Physnet %s requested but not in config',
physnet)
return None, None
ifidx = self.vpp.get_ifidx_by_name(ifname)
if ifidx is None:
LOG.error('Physnet %s interface %s does not '
'exist in VPP', physnet, ifname)
return None, None
self.vpp.set_interface_tag(ifidx, physnet_if_tag(physnet))
return ifname, ifidx
def delete_network_bridge_on_host(
self, net_type: str,
bridge_domain_id: vpp.br_idx_t,
uplink_if_idx: vpp.if_idx_t) -> None:
"""Delete a bridge corresponding to a network from VPP
Usable on restart - uses nothing but the data in VPP.
"""
if bridge_domain_id in self.vpp.get_bridge_domains():
# If there are ports still in this network, disable them
# They may be deleted later (if at startup) or they may
# be rebound to another bridge domain
if_idxes = self.vpp.get_ifaces_in_bridge_domain(bridge_domain_id)
# When this bridge domain is for an OpenStack flat network, the
# uplink interface may be a physical interface, i.e. not VLAN-based
# sub-interfaces. In this case, we will not bring down the uplink
# interface, and always leave it UP.
if_idxes_without_uplink = \
[i for i in if_idxes if i != uplink_if_idx]
# At startup, this is downing the interfaces in a bridge that
# is no longer required. However, in free running, this
# should never find interfaces at all - they should all have
# been unbound before the deletion. (If it does find them,
# the removal of interfaces is probably the best thing we can
# do, but they may not stay down if it races with the binding
# code.)
self.vpp.ifdown(*if_idxes_without_uplink)
self.vpp.delete_from_bridge(*if_idxes)
self.vpp.delete_bridge_domain(bridge_domain_id)
# The physnet is gone so no point in keeping the vlan sub-interface
# TODO(onong): VxLAN
if net_type == 'vlan':
if uplink_if_idx is not None:
self.vpp.delete_vlan_subif(uplink_if_idx)
########################################
# stolen from LB driver
def _bridge_exists_and_ensure_up(self, bridge_name):
"""Check if the bridge exists and make sure it is up."""
br = ip_lib.IPDevice(bridge_name)
br.set_log_fail_as_error(False)
try:
# If the device doesn't exist this will throw a RuntimeError
br.link.set_up()
except RuntimeError:
return False
return True
def ensure_kernel_bridge(self, bridge_name):
"""Create a bridge unless it already exists."""
# _bridge_exists_and_ensure_up instead of device_exists is used here
# because there are cases where the bridge exists but it's not UP,
# for example:
# 1) A greenthread was executing this function and had not yet executed
# "ip link set bridge_name up" before eventlet switched to this
# thread running the same function
# 2) The Nova VIF driver was running concurrently and had just created
# the bridge, but had not yet put it UP
if not self._bridge_exists_and_ensure_up(bridge_name):
bridge_device = bridge_lib.BridgeDevice.addbr(bridge_name)
bridge_device.setfd(0)
bridge_device.disable_stp()
bridge_device.disable_ipv6()
bridge_device.link.set_up()
else:
bridge_device = bridge_lib.BridgeDevice(bridge_name)
return bridge_device
# TODO(ijw): should be checking this all succeeded
# end theft
########################################
def _consider_external_device(self, dev_name):
"""See if we need to take action when a net device is created
This function will be called as a callback when a new interface is
created in Linux kernel. We will filter for tap interfaces created by
OpenStack, and those will be added to the bridges that we create on the
Neutron side of things.
"""
match = re.search(r'tap[0-9a-f]{8}-[0-9a-f]{2}', dev_name)
if not match:
return
# TODO(ijw) will act upon other mechanism drivers' taps
# Add the detected external device to be handled by the port-watcher
self.external_devices.put(dev_name)
def tap_notify_thread(self):
"""Ensure detected external tap devices are added to the bridge.
All detected external devices are queued in the external_devices
data set. So handle it in this method to ensure that these are added
to the bridge.
"""
while True:
try:
dev_name = self.external_devices.get()
port_id = dev_name[3:]
bridge_name = "br-%s" % port_id
self.ensure_tap_in_bridge(dev_name, bridge_name)
except Exception:
LOG.exception("Error while binding tap interface %s", dev_name)
def ensure_tap_in_bridge(self, tap_name, bridge_name):
"""Add a TAP device to a Linux kernel bridge
Defend against this having been done already (common on restart)
and this missing a requirement (common when plugging external
tap interfaces).
"""
bridge = bridge_lib.BridgeDevice(bridge_name)
bridge.set_log_fail_as_error(False)
if bridge.exists() and ip_lib.device_exists(tap_name) \
and not bridge.owns_interface(tap_name):
try:
bridge.addif(tap_name)
except Exception as ex:
# External TAP interfaces created by DHCP or L3 agent will be
# added to corresponding Linux Bridge by vpp-agent to talk to
# VPP. During a regular port binding process, there are two
# code paths calling this function for adding the interface to
# the Linux Bridge, which may potentially cause a race
# condition and a non-harmful traceback in the log. Also, it
# is quite possible that a bridge may have been deleted by the
# normal port unbinding process before this code tries to add
# the tap interafce.
# The fix will eliminate the non-harmful traceback in the log.
match1 = re.search(r"Stderr\: device (vpp|tap)[0-9a-f]{8}-"
"[0-9a-f]{2} is already a member of a "
"bridge; can't enslave it to bridge br-"
r'[0-9a-f]{8}-[0-9a-f]{2}\.', str(ex))
match2 = re.search(r"Stderr: Error: argument \"br-"
"[0-9a-f]{8}-[0-9a-f]{2}\" is wrong: "
"Device does not exist", str(ex))
if not match1 and not match2:
LOG.exception("Can't add interface %s to bridge %s: %s" %
(tap_name, bridge_name, str(ex)))
def _ensure_kernelside_tap(self, bridge_name, tap_name, int_tap_name):
# This is the kernel-side config (and we should not assume
# that, just because the interface exists in VPP, it has
# been done previously - the crash could occur in the
# middle of the process)
# Running it twice is harmless. Never running it is
# problematic.
# TODO(ijw): someone somewhere ought to be sorting
# the MTUs out
self.ensure_kernel_bridge(bridge_name)
# This is the device that we just created with VPP
self.ensure_tap_in_bridge(int_tap_name, bridge_name)
# This is the external TAP device that will be
# created by Nova or an agent, say the DHCP agent,
# later in time.
self.ensure_tap_in_bridge(tap_name, bridge_name)
# This is called by the (eventlet) inotify functions and the (eventlet)
# etcd functionality, and thus needs an eventlet-based lock. We've found
# oslo_concurrency thinks that, because threading is unpatched, a threading
# lock is required, but this ends badly.
@eventlet_lock('ensure-interface-lock')
def ensure_interface_on_host(self, if_type, uuid, mac=None):
"""Create or update vpp interface on host based on if_type.
Depending on the if_type (maketap, plugtap or vhostuser) call vpp papi
to do vpp side of the plumbing. This will change depending on the
if_type. The interfaces are tagged saved in the internal dict for easy
retrieval.
The call is idempotent if the uuid and its associated
interface is already present.
:return: dict indexed on uuid
"""
if uuid in self.interfaces:
# It's definitely there, we made it ourselves
pass
else:
# TODO(ijw): it may exist, but we may need to create it
# - and what exists may be wrong so we may have to
# recreate it
# TODO(ijw): idempotency
LOG.debug('Creating port %s as type %s with mac %s',
uuid, if_type, mac)
# Deal with the naming conventions of interfaces
# TODO(ijw): naming not obviously consistent with
# Neutron's naming
tap_name = get_tap_name(uuid)
# TODO(ijw) structured type
props: dict
if if_type == 'tap':
bridge_name = get_bridge_name(uuid)
int_tap_name = get_vpptap_name(uuid)
props = {'bridge_name': bridge_name,
'ext_tap_name': tap_name,
'int_tap_name': int_tap_name}
elif if_type == 'vhostuser':
path = get_vhostuser_name(uuid)
props = {'path': path}
else:
raise UnsupportedInterfaceException()
tag = port_tag(uuid)
props['bind_type'] = if_type
# NB(onong): In case the if_type is vhostuser then this is the
# neutron port's/VM's mac and it has implications for gpe networks
# so please be mindful before altering this
props['mac'] = mac
iface_idx = self.vpp.get_ifidx_by_tag(tag)
if iface_idx is not None:
# The agent has at some point reset, but before the reset
# this interface was at least created. A previous sweep
# will have ensured it's the right sort of interface.
LOG.debug('port %s recovering existing port in VPP',
uuid)
else:
# Make an interface, and tag it for refinding.
LOG.debug('binding port %s as type %s' %
(uuid, if_type))
if if_type == 'tap':
iface_idx = self.vpp.create_tap(int_tap_name, mac=None,
tag=tag)
elif if_type == 'vhostuser':
iface_idx = self.vpp.create_vhostuser(path, tag)
if if_type == 'tap':
# Plugtap interfaces belong in a kernel bridge, and we need
# to monitor for the other side attaching.
self._ensure_kernelside_tap(bridge_name,
tap_name,
int_tap_name)
props['iface_idx'] = iface_idx
self.interfaces[uuid] = props
return self.interfaces[uuid]
def ensure_interface_for_vhost_socket_binding(self, name):
"""Ensure vpp interface for imminent vhost socket binding.
Somebody has dropped a file in the vhost_socket_directory which matched
our watch pattern (Neutron port uuid). We are expecting an imminent
vhost socket binding (from presumably Nova), so lets get ahead of the
curve and create a vhost socket for it.
Inteface name is the vhost socket file name and since we don't know
the mac, let vhost interface create function make one.
"""
LOG.debug("Calling VPP interface creation on vhost socket with props "
"vif_type: %s , uuid: %s ", 'vhostuser', name)
self.ensure_interface_on_host('vhostuser', uuid=name, mac=None)
def ensure_interface_in_vpp_bridge(self, net_br_idx, iface_idx):
"""Idempotently ensure that a bridge contains an interface
The interface must exist, but we ensure the bridge exists and
that the interface is in it
"""
self.ensure_bridge_domain_in_vpp(net_br_idx)
# Adding an interface to a bridge does nothing if it's
# already in there, and moves it if it's in another
self.vpp.add_to_bridge(net_br_idx, iface_idx)
def ensure_bridge_domain_in_vpp(self, bridge_idx):
if bridge_idx not in self.vpp.get_bridge_domains():
LOG.debug('Creating vpp bridge domain %s', bridge_idx)
self.vpp.create_bridge_domain(bridge_idx, self.mac_age)
def bind_interface_on_host(self, if_type, uuid, mac, physnet,
net_type, seg_id):
"""Configure the interface in VPP per the binding request.
Because we may be restarting the agent on a VPP that is already
running, do this defensively: interfaces that we do not know
about may have had some of their binding done. Acting in this
way, we can be sure that the interface is now correctly bound
regardless of what may have transpired previously.
This may be called at any time because of a request from
the mechanism driver, or it may be called during resync
when state already exists in VPP but in either case we fix
what we find and draw out from that a picture of the current
state, including whether (in the case of vhostuser interfaces)
the far end of the socket has attached to VPP.
"""
# In order, we create the network bridge, the interface for
# the far end, and we add it to the bridge. Any of these
# may have been done before; the functions we call correct
# any previous state they find.
net_data = self.net_driver.ensure_network(physnet, net_type, seg_id)
if net_data is None:
LOG.error('port bind is not possible as physnet '
'could not be configured')
# Returning None allows us to deal with the uplink
# side of a failed binding in the caller.
# For resyncs, the port exists but it's not in a bridge domain
# and is down, which is the best we can offer.
return None
if net_type == TYPE_GPE and self.gpe is None:
LOG.error('port bind - GPE is not enabled')
return None
net_br_idx = net_data['bridge_domain_id']
props = self.ensure_interface_on_host(if_type, uuid, mac)
iface_idx = props['iface_idx']
self.ensure_interface_in_vpp_bridge(net_br_idx, iface_idx)
# Ensure local mac to VNI mapping for GPE
if net_type == TYPE_GPE and self.gpe is not None:
LOG.debug('Adding local GPE mapping for seg_id:%s and mac:%s',
seg_id, mac)
self.gpe.add_local_gpe_mapping(seg_id, mac)
props['net_data'] = net_data
LOG.debug('Bound vpp interface with sw_idx:%s on '
'bridge domain:%s',
iface_idx, net_br_idx)
return props
def unbind_interface_on_host(self, uuid):
"""Detach an interface, clean up structures
This removes and destroys the interface and the network
if it is no longer used.
This is *not* used in rebinding, as this requires the data
we stored about an interface when it was bound.
"""
if uuid not in self.interfaces:
LOG.debug('unknown port %s unbinding request - ignored',
uuid)
else:
props = self.interfaces[uuid]
net = props.get('net_data')
self.clean_interface_from_vpp(uuid, props)
# Delete the port ip address from remote_group_id list
self.port_ips.pop(uuid, None)
if net is not None:
# Check if this is the last interface on host, safe if this
# interface is incompletely bound
for interface in self.interfaces.values():
if net == interface.get('net_data'):
# safe if the other interface is not bound
break
else:
# Network is not used on this host, delete it
self.net_driver.delete_network(net['physnet'],
net['network_type'],
net['segmentation_id'])
def bind_subport_on_host(self, parent_port, subport_data):
"""Bind the subport of a bound parent vhostuser port."""
# We ensure parent port binding before calling this method.
subport_uuid = subport_data['port_id']
subport_seg_id = subport_data['segmentation_id']
# parent vhostuser intf
parent_props = self.interfaces[parent_port]
parent_if_idx = parent_props['iface_idx']
# Ensure that the uplink and the BD's are setup
physnet = subport_data['physnet']
uplink_seg_type = subport_data['uplink_seg_type']
uplink_seg_id = subport_data['uplink_seg_id']
LOG.debug('trunk: ensuring subport network on host '
'physnet %s, uplink_seg_type %s, uplink_seg_id %s',
physnet, uplink_seg_type, uplink_seg_id)
# Ensure an uplink for the subport
# Use the uplink physnet, uplink_seg_id & seg_type
net_data = self.net_driver.ensure_network(physnet,
uplink_seg_type,
uplink_seg_id)
if net_data is None:
LOG.error('trunk sub-port binding is not possible as the '
'physnet could not be configured for subport')
return None
# fetch if the subport interface already in vpp
subport_tag = port_tag(subport_uuid)
subport_if_idx = self.vpp.get_ifidx_by_tag(subport_tag)
net_br_idx = net_data['bridge_domain_id']
if subport_if_idx is not None:
# It's already there and we created it
LOG.debug('Recovering existing trunk subport %s in VPP',
subport_uuid)
# Ensure that the recovered subport is in vpp bridge
self.ensure_interface_in_vpp_bridge(net_br_idx, subport_if_idx)
else:
# create subport vhostuser intf and ensure it's in vpp bridge
LOG.debug('trunk: ensuring subport interface on host '
'parent_if_idx %s, seg_id %s', parent_if_idx,
subport_seg_id)
subport_if_idx = self.vpp.create_vlan_subif(parent_if_idx,
subport_seg_id)
self.ensure_interface_in_vpp_bridge(net_br_idx, subport_if_idx)
# set subport tag, so we can find it during resyncs
self.vpp.set_interface_tag(subport_if_idx, subport_tag)
LOG.debug("Bound subport in vpp with sw_idx: %s on BD: %s ",
subport_if_idx, net_br_idx)
# Add subport props to interfaces along with parent port uuid
self.interfaces[subport_uuid] = {'iface_idx': subport_if_idx,
'net_data': net_data,
'mac': parent_props['mac'],
'bind_type': 'vhostuser',
'path': parent_props['path'],
'parent_uuid': parent_port
}
if 'trunk' not in parent_props:
LOG.debug('Setting trunk attr value in parent port props for '
'subport %s', subport_uuid)
parent_props['trunk'] = set([subport_uuid])
else:
LOG.debug('Adding subport to trunk parent props for subport %s ',
subport_uuid)
parent_props['trunk'].add(subport_uuid)
return self.interfaces[subport_uuid]
def unbind_subport_on_host(self, subport):
"""Unbind the vhostuser subport in VPP."""
if subport not in self.interfaces:
LOG.debug('unknown subport %s unbinding request - ignored',
subport)
else:
LOG.debug("Unbinding subport %s on host", subport)
parent_port = self.interfaces[subport]['parent_uuid']
LOG.debug("Parent port id of subport %s is %s",
subport, parent_port)
self.unbind_interface_on_host(subport)
self.interfaces[parent_port]['trunk'].remove(subport)
def clean_interface_from_vpp(self, uuid, props):
# Don't unbind a trunk port with subports
if 'trunk' in props and len(props['trunk']) > 0:
LOG.debug('Waiting for subports %s to be unbound before '
'unbinding trunk port %s', props, uuid)
return
iface_idx = props['iface_idx']
LOG.debug('unbinding port %s, recorded as type %s',
uuid, props['bind_type'])
# We no longer need this interface. Specifically if it's
# a vhostuser interface it's annoying to have it around
# because the VM's memory (hugepages) will not be
# released. So, here, we destroy it.
# GPE code in VPP does not clean up its data structures
# properly if the port
# is deleted from the bridge without first removing the
# local GPE eid mapping. So remove local mapping,
# if we are bound using GPE
if props['net_data']['network_type'] == TYPE_GPE \
and self.gpe is not None:
mac = props['mac']
seg_id = props['net_data']['segmentation_id']
self.gpe.delete_local_gpe_mapping(seg_id, mac)
if props['bind_type'] == 'vhostuser':
# Delete port from vpp_acl map if present
if iface_idx in self.port_vpp_acls:
self.remove_acls_on_port(iface_idx)
self.remove_mac_ip_acl_on_port(iface_idx)
del self.port_vpp_acls[iface_idx]
# remove port from bridge (sets to l3 mode) prior to deletion
self.vpp.delete_from_bridge(iface_idx)
# If it is a subport of a trunk port then delete the corresponding
# vlan sub-interface. Otherwise it is a parent port or a normal
# vhostuser port and we delete the vhostuser interface itself.
if 'parent_uuid' not in props:
self.vpp.delete_vhostuser(iface_idx)
else:
self.vpp.delete_vlan_subif(iface_idx)
# This interface is no longer connected if it's deleted
# RACE, as we may call unbind BEFORE the vhost user
# interface is notified as connected to qemu
self.vhostuser_unlink(uuid)
elif props['bind_type'] == 'tap':
# remove port from bridge (sets to l3 mode) prior to deletion
self.vpp.delete_from_bridge(iface_idx)
self.vpp.delete_tap(iface_idx)
bridge_name = get_bridge_name(uuid)
class FailableBridgeDevice(bridge_lib.BridgeDevice):
# For us, we expect failing commands and want them ignored.
def _ip_link(self, cmd):
cmd = ['ip', 'link'] + cmd
ip_wrapper = ip_lib.IPWrapper(self.namespace)
return ip_wrapper.netns.execute(
cmd,
check_exit_code=False,
log_fail_as_error=False,
run_as_root=True
)
bridge = FailableBridgeDevice(bridge_name)
if bridge.exists():
# These may fail, don't care much
if bridge.owns_interface(props['int_tap_name']):
bridge.delif(props['int_tap_name'])
if bridge.owns_interface(props['ext_tap_name']):
bridge.delif(props['ext_tap_name'])
bridge.link.set_down()
bridge.delbr()
else:
LOG.error('Unknown port type %s during unbind',
props['bind_type'])
self.interfaces.pop(uuid)
# TODO(ijw) this *really* needs typing with the return value structure.
def _to_acl_rule(self, r, d):
"""Convert a SecurityGroupRule to VPP ACL rule.
Arguments:
r - SecurityGroupRule NamedTuple Object
SecurityGroupRule = namedtuple(
'SecurityGroupRule',
['is_ipv6',
'remote_ip_addr',
'ip_prefix_len',
'protocol',
'port_min',
'port_max'])
TODO(ijw): is_ipv6 appears to be the v6-ness of remote_ip_addr
d - Direction: 0 ==> ingress, 1 ==> egress
Default == 2
Return: VPP-formatted ACL Rule
"""
acl_rule = {}
# a - Permit-Action: 1 == permit, 2 == reflexive;
if not reflexive_acls:
a = 1
# Enable reflexive ACLs for all TCP/UDP and IP traffic
elif r.protocol in [6, 17, 0]:
a = 2
else:
a = 1 # Disable reflexive for other traffic such as ICMP etc.
acl_rule['is_permit'] = a
acl_rule['proto'] = r.protocol
# for ingress: secgroup remote_ip == Source IP
# for egress: secgroup remote_ip == Destination IP
# Port ranges are always destination port ranges for TCP/UDP
# Set source port range to permit all ranges from 0 to 65535
if d == 0:
# OpenStack may provide an interface with subnet (i.e. an
# address *on* a network and not an address *of* the
# network). VPP requires the network address.
acl_rule['src_prefix'] = \
ip_interface((r.remote_ip_addr, r.ip_prefix_len,)).network
acl_rule['dst_prefix'] = \
ip_network("::/0" if r.is_ipv6 else "0.0.0.0/0")
else:
acl_rule['src_prefix'] = \
ip_network("::/0" if r.is_ipv6 else "0.0.0.0/0")
# OpenStack may provide an interface with subnet (i.e. an
# address *on* a network and not an address *of* the
# network). VPP requires the network address.
acl_rule['dst_prefix'] = \
ip_interface((r.remote_ip_addr, r.ip_prefix_len,)).network
# Handle ICMP/ICMPv6
if r.protocol in [1, 58]:
if r.port_min == -1: # All ICMP Types and Codes [0-255]
acl_rule['srcport_or_icmptype_first'] = 0
acl_rule['srcport_or_icmptype_last'] = 255
acl_rule['dstport_or_icmpcode_first'] = 0
acl_rule['dstport_or_icmpcode_last'] = 255
elif r.port_max == -1: # All ICMP codes for an ICMP Type
acl_rule['srcport_or_icmptype_first'] = r.port_min
acl_rule['srcport_or_icmptype_last'] = r.port_min
acl_rule['dstport_or_icmpcode_first'] = 0
acl_rule['dstport_or_icmpcode_last'] = 255
else: # port_min == ICMP Type and port_max == ICMP Code
acl_rule['srcport_or_icmptype_first'] = r.port_min
acl_rule['srcport_or_icmptype_last'] = r.port_min
acl_rule['dstport_or_icmpcode_first'] = r.port_max
acl_rule['dstport_or_icmpcode_last'] = r.port_max
# Handle TCP/UDP protocols
elif r.protocol in [6, 17]:
acl_rule['dstport_or_icmpcode_first'] = \
default_if_none(r.port_min, 0)
acl_rule['dstport_or_icmpcode_last'] = \
default_if_none(r.port_max, 65535)
# Allow all ranges for source ports
acl_rule['srcport_or_icmptype_first'] = 0
acl_rule['srcport_or_icmptype_last'] = 65535
# Handle all protocols - All IPv4 and IPv6 TCP/UDP traffic
elif r.protocol == 0:
acl_rule['dstport_or_icmpcode_first'] = 0
acl_rule['dstport_or_icmpcode_last'] = 65535
acl_rule['srcport_or_icmptype_first'] = 0
acl_rule['srcport_or_icmptype_last'] = 65535
return acl_rule
# Reverse rules are only added if reflexive_acls is set to False
def _reverse_rule(self, r):
"""Compose and return a reverse rule for r if reflexive_acls is False
Arguments:
r - rule dictionary returned by the _to_acl_rule(r) method above
swap src and dst IP and port ranges to match return traffic for r
"""
acl_rule = {}
# 1 == Permit rule and 0 == deny rule
acl_rule['is_permit'] = r['is_permit']
acl_rule['proto'] = r['proto']
# All TCP/UDP IPv4 and IPv6 traffic
if r['proto'] in [6, 17, 0]:
acl_rule['src_prefix'] = r['dst_prefix']
acl_rule['dst_prefix'] = r['src_prefix']
# Swap port range values
acl_rule['srcport_or_icmptype_first'] = \
r['dstport_or_icmpcode_first']
acl_rule['srcport_or_icmptype_last'] = \
r['dstport_or_icmpcode_last']
acl_rule['dstport_or_icmpcode_first'] = \
r['srcport_or_icmptype_first']
acl_rule['dstport_or_icmpcode_last'] = \
r['srcport_or_icmptype_last']
return acl_rule
def acl_add_replace_on_host(self, secgroup):
"""Adds/Replaces the secgroup ACL within VPP
Arguments:
secgroup - SecurityGroup NamedTuple object
namedtuple('SecurityGroup', ['id', 'ingress_rules', 'egress_rules'])
"""
# Default action == ADD if the acl indexes are set to ~0
# VPP ACL indexes correspond to ingress and egress security
# group rules
in_acl_idx, out_acl_idx = \
self.secgroups.get(secgroup.id,
VppAcl(0xffffffff, 0xffffffff))
in_acl_rules, out_acl_rules = (
[self._to_acl_rule(r, 0) for r in secgroup.ingress_rules],
[self._to_acl_rule(r, 1) for r in secgroup.egress_rules])
# If not reflexive_acls create return rules for ingress and egress
# IPv4/IPv6 tcp/udp traffic
# Exclude ICMP
if not reflexive_acls:
in_acl_return_rules, out_acl_return_rules = (
[self._reverse_rule(r) for r in in_acl_rules
if r['proto'] in [6, 17, 0]],
[self._reverse_rule(r) for r in out_acl_rules
if r['proto'] in [6, 17, 0]]
)
in_acl_rules = in_acl_rules + out_acl_return_rules
out_acl_rules = out_acl_rules + in_acl_return_rules
in_acl_idx = self.vpp.acl_add_replace(acl_index=in_acl_idx,
tag=secgroup_tag(secgroup.id,
VPP_TO_VM),
rules=in_acl_rules)
out_acl_idx = self.vpp.acl_add_replace(acl_index=out_acl_idx,
tag=secgroup_tag(secgroup.id,
VM_TO_VPP),
rules=out_acl_rules)
self.secgroups[secgroup.id] = VppAcl(in_acl_idx, out_acl_idx)
# If this is on the pending delete list it shouldn't be now
self.deferred_delete_secgroups.discard(secgroup.id)
def acl_delete_on_host(self, secgroup):
"""Deletes the ingress and egress VPP ACLs on host for secgroup
This may delete up front or it may defer (and delete when it's
next called, which is adequately fast) if there's a port using
the ACL.
Arguments:
secgroup - OpenStack security group ID
"""
# Attempt both the current ACL and any more ACLs that have been
# previously deferred:
self.deferred_delete_secgroups.add(secgroup)
remaining_secgroups = set()
for secgroup in self.deferred_delete_secgroups:
try:
secgroup_acls = self.secgroups[secgroup]
except KeyError:
LOG.error("secgroup_watcher: received request to delete "
"an unknown security group %s", secgroup)
# This security group doesn't exist, don't add to the
# deferred list
continue
try:
used = False
for iface in self.vpp.get_interfaces():
in_acls, out_acls = self.vpp.get_interface_acls(
iface['sw_if_idx'])
for acl_idx in secgroup_acls:
if acl_idx in in_acls or acl_idx in out_acls:
used = True
break
if used:
LOG.debug('deferring delete of acls for secgroup %s'
' as a port is using them', secgroup)
remaining_secgroups.add(secgroup)
else:
for acl_idx in secgroup_acls:
self.vpp.acl_delete(acl_index=acl_idx)
del self.secgroups[secgroup]
# Discard the security group from the remote group dict
for remote_group in self.remote_group_secgroups:
self.remote_group_secgroups[
remote_group].discard(secgroup)
except Exception as e:
LOG.exception("Exception while deleting ACL %s", e)
# We could defer this again but it's probably better
# we move on. Orphaned ACLs are not the end of the world.
remaining_secgroups.add(secgroup)
self.deferred_delete_secgroups = remaining_secgroups
def populate_secgroup_acl_mappings(self):
"""From vpp acl dump, populate the secgroups to VppACL mapping.
Get a dump of existing vpp ACLs that are tagged, by tag
Decode tag info
populate secgroups data structure relating UUID of secgroup to ACL
self.secgroups = {secgroup_id : VppAcl(in_idx, out_idx)}
"""
LOG.debug("Populating secgroup to VPP ACL map..")
# Clear existing secgroups to ACL map for sanity
self.secgroups = {}
# Example of the acl_map data
# acl_map: {'net-vpp.secgroup:<uuid>.from-vpp' : acl_idx
# 'net-vpp.secgroup:<uuid>.to-vpp' : acl_idx,
# 'net-vpp.common_spoof.from-vpp': acl_idx }
acl_map = self.get_secgroup_acl_map()
for item, acl_idx in acl_map.items():
# Tags can be one of ours, or one something else set
# decode_* functions attempt to match the tags to one of our
# formats, and returns None if that's not a format it matches.
secgroup_id, direction = decode_secgroup_tag(item)
if secgroup_id is None:
# Check if this is one of our common spoof ACL tag
# If so, get the tag direction and set the secgroup_id to
# COMMON_SPOOF_TAG so the correct spoof ACL can be read
direction = decode_common_spoof_tag(item)
if direction is not None:
# But it is a valid spoof tag
secgroup_id = COMMON_SPOOF_TAG
ingress = direction == VPP_TO_VM
else: # one of our valid secgroup ACL tag
ingress = direction == VPP_TO_VM
if secgroup_id is None:
# This is neither a security group or a spoof
# - so it's not installed by the mechdriver at all
continue
vpp_acl = self.secgroups.get(secgroup_id,
VppAcl(0xffffffff, 0xffffffff))
# secgroup_id will be missing first pass, and should be
# completed on the second round through.
if ingress:
self.secgroups[secgroup_id] = vpp_acl._replace(
in_idx=acl_idx)
else:
self.secgroups[secgroup_id] = vpp_acl._replace(
out_idx=acl_idx)
if self.secgroups == {}:
LOG.debug("We recovered an empty secgroups "
"to acl mapping. Possible reason: vpp "
"may have been restarted on host.")
# py3 note: in py3 keys() does not return a list but the following
# seems to work fine. Enclose in list() is problems arise.
return self.secgroups.keys()
def get_secgroup_acl_map(self):
"""Read VPP ACL tag data, construct and return an acl_map based on tag
acl_map: {secgroup_tag : acl_idx}
"""
acl_map = {}
try:
for acl_index, tag in self.vpp.get_acl_tags():
# TODO(ijw): identify that this is one of our tags
id, direction = decode_secgroup_tag(tag)
if id is not None:
acl_map[tag] = acl_index
else:
direction = decode_common_spoof_tag(tag)
if direction is not None:
acl_map[tag] = acl_index
# Not all ACLs have tags, but ACLs we own will
# have them and they will be decodeable. Ignore
# any externally created ACLs, they're not our problem.
except Exception:
LOG.exception("Exception getting acl_map from vpp acl tags")
raise
return acl_map
def maybe_set_acls_on_port(self, secgroup_ids, sw_if_index):
"""Compute a vector of input/output ACLs and set it on the VPP port.
Arguments:
secgroup_ids - OpenStack Security Group IDs
sw_if_index - VPP software interface index on which the ACLs will
be set
This method checks the global secgroups to acl mapping to
figure out the ACL indexes associated with the secgroup. It
then composes the acl vector and programs the port using vppf.
If the secgroup cannot be found or if the ACL index is invalid
i.e. 0xffffffff it will return False. This happens mostly in
agent restart situations when the secgroups mapping is still
being populated by the secgroup watcher thread, but since the
port and secgroup threads are independent it can happen at any
moment.
"""
# A list of VppAcl namedtuples to be set on the port
vpp_acls = []
for secgroup_id in secgroup_ids:
acl = self.secgroups.get(secgroup_id)
# If any one or both indices are invalid wait for a valid acl
if (not acl or
acl.in_idx == 0xFFFFFFFF or
acl.out_idx == 0xFFFFFFFF):
LOG.debug("Still waiting for a valid vpp acl "
"corresponding to secgroup %s" % secgroup_id)
return False
else:
vpp_acls.append(acl)
self._set_acls_on_vpp_port(vpp_acls, sw_if_index)
return True
def _set_acls_on_vpp_port(self, vpp_acls, sw_if_index):
"""Build a vector of VPP ACLs and set it on the port
Arguments -
vpp_acls - a list of VppAcl(in_idx, out_idx) namedtuples to be set
on the interface. An empty list '[]' deletes all user
defined acls from the interface and retains only the spoof
ACL
"""
# Initialize lists with anti-spoofing vpp acl indices
spoof_acl = self.spoof_filter_on_host()
# input acl on vpp filters egress traffic from vm and viceversa
input_acls = [spoof_acl.out_idx]
output_acls = [spoof_acl.in_idx]
if vpp_acls:
for acl in vpp_acls:
input_acls.append(acl.out_idx) # in on vpp == out on vm
output_acls.append(acl.in_idx) # out on vpp == in on vm
# Build the vpp ACL vector
acls = input_acls + output_acls
# (najoy) At this point we just keep a mapping of acl vectors
# associated with a port and do not check for any repeat application.
self.vpp.set_acl_list_on_interface(sw_if_index,
input_acls, output_acls)
self.port_vpp_acls[sw_if_index]['l34'] = acls
def set_mac_ip_acl_on_vpp_port(self, mac_ips, sw_if_index):
"""Set the mac-filter on VPP port
Arguments:
mac_ips - A list of tuples of (mac_address, ip_address)
sw_if_index - Software index ID of the VPP port
"""
def _pack_mac(mac_address):
"""Pack a mac_address into binary."""
return binascii.unhexlify(mac_address.replace(':', ''))
src_mac_mask = _pack_mac('FF:FF:FF:FF:FF:FF')
mac_ip_rules = []
for mac, ip in mac_ips: # ip can be an address (or) a network/prefix
# TODO(ijw): is it ever an interface rather than a network address?
# This is the struct the VPP API accepts: note the packed address
mac_ip_rules.append(
{'is_permit': 1,
'src_mac': _pack_mac(mac),
'src_mac_mask': src_mac_mask,
'src_prefix': ip_network(ip)})
# get the current mac_ip_acl on the port if_any
port_mac_ip_acl = None
try:
port_mac_ip_acl = self.port_vpp_acls[sw_if_index]['l23']
except KeyError:
pass # There may not be an ACL on the interface
acl_index = self.vpp.macip_acl_add(rules=mac_ip_rules,
count=len(mac_ip_rules))
self.vpp.set_macip_acl_on_interface(sw_if_index=sw_if_index,
acl_index=acl_index,
)
if port_mac_ip_acl: # Delete the previous macip ACL from VPP
self.vpp.delete_macip_acl(acl_index=port_mac_ip_acl)
self.port_vpp_acls[sw_if_index]['l23'] = acl_index
def remove_acls_on_port(self, sw_if_index):
"""Removes all security group ACLs on the vpp port
Arguments:-
sw_if_index - Software index of the port on which ACLs are to be
removed
"""
# We should know about the existing ACLS on port by looking up
# port_vpp_acls. If there is a KeyError, we do not know about any
# ACLs on that port. So ignore
try:
self.vpp.delete_acl_list_on_interface(sw_if_index)
del self.port_vpp_acls[sw_if_index]['l34']
except KeyError:
LOG.debug("No Layer3 ACLs are set on interface %s.. nothing "
"to delete", sw_if_index)
def remove_mac_ip_acl_on_port(self, sw_if_index):
"""Removes all MAC/IP ACLs on the vpp port
These ACLs correspond to anti-spoof and allowed-address-pair.
Arguments:-
sw_if_index - Software index of the port on which ACLs are to be
removed
"""
try:
l2_acl_index = self.port_vpp_acls[sw_if_index]['l23']
self.vpp.delete_macip_acl_on_interface(sw_if_index, l2_acl_index)
self.vpp.delete_macip_acl(l2_acl_index)
del self.port_vpp_acls[sw_if_index]['l23']
except KeyError:
LOG.debug("No mac_ip ACLs are set on interface %s.. nothing "
"to delete", sw_if_index)
def spoof_filter_on_host(self):
"""Adds a spoof filter ACL on host if not already present.
A spoof filter is identified by a common spoof tag mark.
If not present create the filter on VPP, If it is present, replace
it for good measure to ensure that the correct anti-spoof rules
are always applied.
Return: VppAcl(in_idx, out_idx)
"""
# Check if we have an existing spoof filter deployed on vpp
spoof_acl = self.secgroups.get(COMMON_SPOOF_TAG)
# Get the current anti-spoof filter rules. If a spoof filter is
# present replace rules for good measure, else create a new
# spoof filter
spoof_filter_rules = self.get_spoof_filter_rules()
if spoof_acl:
in_acl_idx, out_acl_idx = spoof_acl.in_idx, spoof_acl.out_idx
else:
in_acl_idx = out_acl_idx = 0xffffffff
in_acl_idx = self.vpp.acl_add_replace(
acl_index=in_acl_idx,
tag=common_spoof_tag(VPP_TO_VM),
rules=spoof_filter_rules['ingress'])
out_acl_idx = self.vpp.acl_add_replace(
acl_index=out_acl_idx,
tag=common_spoof_tag(VM_TO_VPP),
rules=spoof_filter_rules['egress'])
# Add the new spoof ACL to secgroups mapping if it is valid
if (in_acl_idx != 0xFFFFFFFF
and out_acl_idx != 0xFFFFFFFF and not spoof_acl):
spoof_acl = VppAcl(in_acl_idx, out_acl_idx)
self.secgroups[COMMON_SPOOF_TAG] = spoof_acl
return spoof_acl
def _pack_address(self, ip_addr):
"""Pack an IPv4 or IPv6 (ip_addr or ip_network) into binary.
If the argument is an ip_address, it is packed and if the argument is
an ip_network only the network portion of it is packed
Arguments:-
ip_addr: an IPv4 or IPv6 address without a prefix_length e.g. 1.1.1.1
(or)
an IPv4 or IPv6 network with prefix_length e.g. 1.1.1.0/24
"""
# Works for both addresses and the net address of masked networks
return ip_network(ip_addr).network_address.packed
def _get_snat_indexes(self, floatingip_dict):
"""Return the internal and external interface indices for SNAT.
Ensure the internal n/w, external n/w and their corresponding
BVI loopback interfaces are present, before returning their
index values.
"""
# Get internal network details.
internal_network_data = self.net_driver.ensure_network(
floatingip_dict['internal_physnet'],
floatingip_dict['internal_net_type'],
floatingip_dict['internal_segmentation_id'])
# Get the external network details
external_network_data = self.net_driver.ensure_network(
floatingip_dict['external_physnet'],
floatingip_dict['external_net_type'],
floatingip_dict['external_segmentation_id'])
if internal_network_data and external_network_data:
int_br_idx = internal_network_data['bridge_domain_id']
ext_br_idx = external_network_data['bridge_domain_id']
# Return the internal and external BVI loopback intf indxs.
return (self.ensure_bridge_bvi(int_br_idx),
self.ensure_bridge_bvi(ext_br_idx))
else:
LOG.error('Failed to ensure network on host while setting SNAT')
return None, None
def _delete_external_subinterface(self, floatingip_dict):
"""Check if the external subinterface can be deleted.
It can be deleted if it still exists and has no more
addresses.
"""
external_physnet = floatingip_dict['external_physnet']
external_net_type = floatingip_dict['external_net_type']
external_segmentation_id = floatingip_dict['external_segmentation_id']
external_network_data = self.net_driver.get_network(
external_physnet, external_net_type, external_segmentation_id)
if external_network_data:
physnet_ip_addrs = self.vpp.get_interface_ip_addresses(
external_network_data['if_uplink_idx'])
if not physnet_ip_addrs:
self.net_driver.delete_network(
external_physnet, external_net_type,
external_segmentation_id)
def _ensure_external_vlan_subif(self, if_name, if_idx, seg_id):
sub_if = self.vpp.get_vlan_subif(if_name, seg_id)
if not sub_if:
# Create a VLAN subif
sub_if = self.vpp.create_vlan_subif(if_idx, seg_id)
self.vpp.ifup(sub_if)
return sub_if
def ensure_bridge_bvi(self,
bridge_idx: vpp.br_idx_t,
mac_address: vpp.mac_str_t = None) -> vpp.if_idx_t:
"""Ensure a BVI loopback interface for the bridge."""
bvi_if_idx = self.vpp.get_bridge_bvi(bridge_idx)
if not bvi_if_idx:
bvi_if_idx = self.vpp.create_loopback(mac_address)
self.vpp.set_loopback_bridge_bvi(bvi_if_idx, bridge_idx)
return bvi_if_idx
def ensure_router_interface_on_host(self, port_id, router_data):
"""Ensure a router interface on the local host.
Creates a loopback interface and sets the bridge's BVI to the
loopback interface to act as an L3 gateway for the network.
For external networks, the BVI functions as an SNAT external
interface. For updating an interface, the service plugin removes
the old interface and then adds the new router interface. If an
external gateway exists, ensures a local route in VPP.
When Layer3 HA is enabled, the router interfaces are only enabled on
the active VPP router. The standby router keeps the interface in
an admin down state.
"""
# The interface could be either an external_gw or an internal router
# interface on a subnet
# Enable SNAT by default unless it is set to False
enable_snat = True
# Multiple routers on a shared external subnet is supported
# by adding local routes in VPP.
is_local = 0 # True for local-only VPP routes.
# Create an external interfce if the external_gateway_info key is
# present, else create an internal interface
if router_data.get('external_gateway_info', False):
seg_id = router_data['external_segmentation_id']
net_type = router_data['external_net_type']
physnet = router_data['external_physnet']
vrf = vpp.DEFAULT_VRF
is_inside = False
enable_snat = router_data['external_gateway_info']['enable_snat']
external_gateway_ip = router_data['external_gateway_ip']
# To support multiple IP addresses on a router port, add
# the router to each of the subnets.
gateway_ip = router_data['gateways'][0][0]
prefixlen = router_data['gateways'][0][1]
is_ipv6 = bool(router_data['gateways'][0][2])
else:
seg_id = router_data['segmentation_id']
net_type = router_data['net_type']
physnet = router_data['physnet']
vrf = router_data['vrf_id']
is_inside = True
external_gateway_ip = None
gateway_ip = router_data['gateway_ip']
prefixlen = router_data['prefixlen']
is_ipv6 = bool(router_data['is_ipv6'])
# Ensure the network exists on host and get the network data
net_data = self.net_driver.ensure_network(physnet, net_type, seg_id)
# Get the bridge domain id and ensure a BVI interface for it
bridge_idx = net_data['bridge_domain_id']
# Ensure a BVI (i.e. A loopback) for the bridge domain
loopback_idx = self.vpp.get_bridge_bvi(bridge_idx)
# Create a loopback BVI interface
loopback_mac = vpp.mac_str_t(router_data['loopback_mac'])
if loopback_idx is None:
# Create the loopback interface, but don't bring it UP yet
loopback_idx = self.ensure_bridge_bvi(bridge_idx, loopback_mac)
# Set the VRF for tenant BVI interfaces, if not already set
if vrf and not self.vpp.get_interface_vrf(loopback_idx) == vrf:
self.vpp.set_interface_vrf(loopback_idx, vrf, is_ipv6)
# Make a best effort to set the MTU on the interface
try:
self.vpp.set_interface_mtu(loopback_idx, router_data['mtu'])
except SystemExit:
# Log error and continue, do not exit here
LOG.error("Error setting MTU on router interface")
ha_enabled = cfg.CONF.ml2_vpp.enable_l3_ha
if ha_enabled:
# Now bring up the loopback interface, if this router is the
# ACTIVE router. Also populate the data structure
# router_interface_states so the HA code can activate and
# deactivate the interface
if self.router_state:
LOG.debug("Router HA state is ACTIVE")
LOG.debug("Bringing UP the router intf idx: %s", loopback_idx)
self.vpp.ifup(loopback_idx)
self.router_interface_states[loopback_idx] = 1
else:
LOG.debug("Router HA state is BACKUP")
LOG.debug("Bringing DOWN the router intf idx: %s",
loopback_idx)
self.vpp.ifdown(loopback_idx)
self.router_interface_states[loopback_idx] = 0
LOG.debug("Current router interface states: %s",
self.router_interface_states)
else:
self.vpp.ifup(loopback_idx)
# Set SNAT on the interface if SNAT is enabled
# Get a list of all SNAT interfaces
int_list = self.vpp.get_snat_interfaces()
if loopback_idx not in int_list and enable_snat:
self.vpp.set_snat_on_interface(loopback_idx, is_inside)
# Set the SNAT 1:N overload on the external loopback interface
if not is_inside:
self.vpp.snat_overload_on_interface_address(loopback_idx)
# Add GPE mappings for GPE type networks only on the master
# node, if ha_enabled
if net_type == TYPE_GPE and self.gpe is not None:
if (ha_enabled and self.router_state) or not ha_enabled:
self.gpe.add_local_gpe_mapping(seg_id, loopback_mac)
# Set the gateway IP address on the BVI interface, if not already set
addresses = self.vpp.get_interface_ip_addresses(loopback_idx)
gw_ipif = ip_interface((gateway_ip, prefixlen,))
# Is there another gateway ip_addr set on this external loopback?
if not is_inside:
# Any address other than the one we're thinking of?
exists_gateway = any((addr for addr in addresses
if addr != gw_ipif))
if exists_gateway:
LOG.debug('A router gateway exists on the external network.'
'The current router gateway IP: %s will be added as '
'a local VPP route', str(gw_ipif))
if gw_ipif not in addresses:
# This address is not yet present?
# Add a local VRF route if another external gateway exists
if not is_inside and exists_gateway:
is_local = True
ip_prefix_length = 32 if gw_ipif.version == 4 else 128
# Add a local IP route if it doesn't exist
self.vpp.add_ip_route(vrf=vrf,
ip_address=self._pack_address(
gateway_ip),
prefixlen=ip_prefix_length,
next_hop_address=None,
next_hop_sw_if_index=None,
is_ipv6=is_ipv6,
is_local=is_local)
else:
self.vpp.set_interface_ip(loopback_idx, gw_ipif)
router_dict = {
'segmentation_id': seg_id,
'physnet': physnet,
'net_type': net_type,
'bridge_domain_id': bridge_idx,
'bvi_if_idx': loopback_idx,
'gateway_ip': gateway_ip,
'prefixlen': prefixlen,
'is_ipv6': is_ipv6,
'mac_address': loopback_mac,
'is_inside': is_inside,
'external_gateway_ip': external_gateway_ip,
'vrf_id': vrf,
'uplink_idx': net_data.get('if_uplink_idx'),
'is_local': is_local
}
if is_inside:
LOG.debug("Router: Created inside router port: %s",
router_dict)
self.router_interfaces[port_id] = router_dict
# Ensure that all gateway networks are exported into this
# tenant VRF &
# A default route exists in this VRF to the external gateway
self.export_routes_from_tenant_vrfs(
source_vrf=router_dict['vrf_id'])
else:
LOG.debug("Router: Created outside router port: %s",
router_dict)
self.router_external_interfaces[port_id] = router_dict
# TODO(onong):
# The current VPP NAT implementation supports only one outside
# FIB table and by default it uses table 0, ie, the default vrf.
# So, this is a temporary workaround to tide over the limitation.
if not is_local:
self.default_route_in_default_vrf(router_dict)
# Ensure that the gateway network is exported into all tenant
# VRFs, with the correct default routes
self.export_routes_from_tenant_vrfs(
ext_gw_ip=router_dict['external_gateway_ip'])
return loopback_idx
def become_master_router(self):
"""This node will become the master router"""
LOG.debug("VPP becoming the master router..")
LOG.debug("Current router interface states: %s",
self.router_interface_states)
for idx in self.router_interface_states:
if not self.router_interface_states[idx]:
LOG.debug("Bringing UP the router interface: %s", idx)
# TODO(najoy): Bring up intf. only if not set to admin DOWN
self.vpp.ifup(idx)
self.router_interface_states[idx] = 1
LOG.debug("New router interface states: %s",
self.router_interface_states)
def become_backup_router(self):
"""This node will become the backup router"""
LOG.debug("VPP becoming the standby router..")
LOG.debug("Current router interface states: %s",
self.router_interface_states)
for idx in self.router_interface_states:
if self.router_interface_states[idx]:
LOG.debug("Bringing DOWN the router interface: %s", idx)
self.vpp.ifdown(idx)
self.router_interface_states[idx] = 0
LOG.debug("New router interface states: %s",
self.router_interface_states)
def _get_ip_network(self, gateway_ip, prefixlen):
"""Returns the IP network for the gateway in CIDR form."""
return str(ip_interface(gateway_ip + "/" + str(prefixlen)).network)
def default_route_in_default_vrf(self, router_dict, is_add=True):
# ensure that default route in default VRF is present
if is_add:
self.vpp.add_ip_route(
vrf=router_dict['vrf_id'],
ip_address=self._pack_address('0.0.0.0'),
prefixlen=0,
next_hop_address=self._pack_address(
router_dict['external_gateway_ip']),
next_hop_sw_if_index=router_dict['bvi_if_idx'],
is_ipv6=router_dict['is_ipv6'])
else:
self.vpp.delete_ip_route(
vrf=router_dict['vrf_id'],
ip_address=self._pack_address('0.0.0.0'),
prefixlen=0,
next_hop_address=self._pack_address(
router_dict['external_gateway_ip']),
next_hop_sw_if_index=router_dict['bvi_if_idx'],
is_ipv6=router_dict['is_ipv6'])
def export_routes_from_tenant_vrfs(self, source_vrf=0, is_add=True,
ext_gw_ip=None):
"""Exports the external gateway into the tenant VRF.
The gateway network has to be exported into the tenant VRF for
it to communicate with the outside world. Also a default route
has to be set to the external gateway IP address.
If source_vrf (i.e tenant VRF) is provided,
- Export the external gateway's IP from VRF=0 into this VRF.
- Add a default route to the external_gateway in this VRF
Else,
- Export the external gateway into into all tenant VRFs
- Add a default route to the external_gateway in all tenant VRFs
If the external gateway IP address is not provided:
All external networks are exported into tenant VRFs
"""
if source_vrf:
LOG.debug("Router:Exporting external route into tenant VRF:%s",
source_vrf)
else:
LOG.debug("Router:Exporting external route into all tenant VRFs")
# TODO(najoy): Check if the tenant ID matches for the gateway router
# external interface and export only matching external routes.
for ext_port in self.router_external_interfaces:
gw_port = self.router_external_interfaces[ext_port]
for int_port in self.router_interfaces.values():
int_vrf = int_port['vrf_id']
ext_vrf = gw_port['vrf_id']
# If a source vrf is present only update if the VRF matches
if source_vrf and int_vrf != source_vrf:
continue
is_ipv6 = int_port['is_ipv6']
default_gw_ip = "::" if is_ipv6 else '0.0.0.0'
external_gateway_ip = gw_port['external_gateway_ip']
if ext_gw_ip and external_gateway_ip != ext_gw_ip:
continue
# Get the external and internal networks in the CIDR form
ext_network = self._get_ip_network(
gw_port['gateway_ip'],
gw_port['prefixlen']
)
int_network = self._get_ip_network(
int_port['gateway_ip'],
int_port['prefixlen']
)
if is_add:
# Add the default route (0.0.0.0/0) to the
# external gateway IP addr, which is outside of VPP
# with the next hop sw_if_index set to the external
# loopback BVI address.
# Note: The external loopback sw_if_index and the
# next_hop_address is mandatory here to prevent a VPP
# crash - Similar to the CLI command
# ip route add table <int-vrf> 0.0.0.0/0 via <next-hop-ip>
# <next-hop-sw-indx>
#
# Note(onong): Do not set IPv6 default gateway to an IPv4
# external gateway
ext_ip = ip_address(external_gateway_ip)
if is_ipv6 and ext_ip.version != 6:
LOG.info('Not setting IPv6 default route via an IPv4'
' external gateway')
else:
self.vpp.add_ip_route(
vrf=int_vrf,
ip_address=self._pack_address(default_gw_ip),
prefixlen=0,
next_hop_address=self._pack_address(
external_gateway_ip),
next_hop_sw_if_index=gw_port['bvi_if_idx'],
is_ipv6=is_ipv6)
# Export the external gateway subnet into the tenant VRF
# to enable tenant traffic to flow out. Exporting is done
# by setting the next hop sw if index to the loopback's
# sw_index (i.e. BVI) on the external network
# CLI: ip route add table <int_vrf> <external-subnet>
# via <next-hop-sw-indx>
#
# Note(onong): Do not export an IPv4 external network
# into an IPv6 VRF.
ext_net = ip_network(ext_network)
if is_ipv6 and ext_net.version != 6:
LOG.info('Not exporting IPv4 external network into '
'tenant\'s IPv6 VRF')
else:
self.vpp.add_ip_route(
vrf=int_vrf,
ip_address=self._pack_address(ext_network),
prefixlen=gw_port['prefixlen'],
next_hop_address=None,
next_hop_sw_if_index=gw_port['bvi_if_idx'],
is_ipv6=is_ipv6)
# Export the tenant network into external VRF so the
# gateway can route return traffic to the tenant VM from
# the Internet.
# CLI: ip route add table 0 <tenant-subnet> via
# <tenant-loopback-bvi>
#
# Note(onong): Do not export an IPv4 internal network
# into an IPv6 external VRF.
int_net = ip_network(int_network)
if is_ipv6 and int_net.version != 6:
LOG.info('Not exporting tenant\'s IPv4 internal '
'network into IPv6 external VRF')
else:
self.vpp.add_ip_route(
vrf=ext_vrf,
ip_address=self._pack_address(int_network),
prefixlen=int_port['prefixlen'],
next_hop_address=None,
next_hop_sw_if_index=int_port['bvi_if_idx'],
is_ipv6=is_ipv6)
else:
self.vpp.delete_ip_route(
vrf=int_vrf,
ip_address=self._pack_address(default_gw_ip),
prefixlen=0,
next_hop_address=self._pack_address(
external_gateway_ip),
next_hop_sw_if_index=gw_port['bvi_if_idx'],
is_ipv6=is_ipv6)
# Delete the exported route in tenant VRF
self.vpp.delete_ip_route(
vrf=int_vrf,
ip_address=self._pack_address(ext_network),
prefixlen=gw_port['prefixlen'],
next_hop_address=None,
next_hop_sw_if_index=gw_port['bvi_if_idx'],
is_ipv6=is_ipv6)
# Delete the exported route from the external VRF
self.vpp.delete_ip_route(
vrf=ext_vrf,
ip_address=self._pack_address(int_network),
prefixlen=int_port['prefixlen'],
next_hop_address=None,
next_hop_sw_if_index=int_port['bvi_if_idx'],
is_ipv6=is_ipv6)
def delete_router_interface_on_host(self, port_id):
"""Deletes a router interface from the host.
Disables SNAT, if it is set on the interface.
Deletes a loopback interface from the host, this removes the BVI
interface from the local bridge. Also, delete the default route and
SNAT address for the external interface.
"""
is_external = 0
if port_id in self.router_interfaces:
router = self.router_interfaces[port_id]
elif port_id in self.router_external_interfaces:
router = self.router_external_interfaces[port_id]
is_external = 1
ext_intf_ip = '{}/{}'.format(router['gateway_ip'],
router['prefixlen'])
# Get all local IP addresses in the external VRF belonging
# to the same external subnet as this router.
# Check if atleast one local_ip matches a neutron assigned
# external IP address of the router.
# If there's no match, there are no valid local IPs within VPP.
local_gw_ips = [r['gateway_ip'] for
r in self.router_external_interfaces.values()
if r['is_local']]
# While, in theory, there may be multiple IPs on an interface,
# in practice, we only program one (and program additional
# IPs via a local route).
# TODO(ijw): this is a somewhat unclean way of removing IP
# addresses attached to VPP interfaces that are in the
# subnet of ext_intf_ip, I think. Unclear if this is the
# right way to do that versus remembering the interface.
local_ip: Optional[str]
for ip in self.vpp.get_local_ip_address(ext_intf_ip,
router['is_ipv6'],
router['vrf_id']):
# Is the local_ip valid?
if ip in local_gw_ips:
LOG.debug('Found a router external local_ip in VPP: %s',
ip)
local_ip = ip
break
# For-else would mean no breaks i.e. no valid local_ips
else:
local_ip = None
else:
LOG.error("Router port:%s deletion error...port not found",
port_id)
return False
net_br_idx = router['bridge_domain_id']
bvi_if_idx = self.vpp.get_bridge_bvi(net_br_idx)
# If an external local route, we can safetly delete it from VPP
# Don't delete any SNAT
if is_external and router['is_local']:
LOG.debug("delete_router_intf: Removing the local route: %s/32",
router['gateway_ip'])
prefixlen = 128 if router['is_ipv6'] else 32
self.vpp.delete_ip_route(vrf=router['vrf_id'],
ip_address=self._pack_address(
router['gateway_ip']),
prefixlen=prefixlen,
next_hop_address=None,
next_hop_sw_if_index=None,
is_ipv6=router['is_ipv6'],
is_local=True)
# External router is a loopback BVI. If a local route exists,
# replace the BVI's IP address with its IP address.
# Don't delete the SNAT.
elif is_external and local_ip is not None:
LOG.debug('delete_router_intf: replacing router loopback BVI IP '
'address %s with the local ip address %s',
router['gateway_ip'], local_ip)
# Delete the IP address from the BVI.
if bvi_if_idx is not None:
self.vpp.del_interface_ip(
bvi_if_idx,
ip_interface((router['gateway_ip'],
router['prefixlen'],)))
# Delete the local route
prefixlen = 128 if router['is_ipv6'] else 32
self.vpp.delete_ip_route(vrf=router['vrf_id'],
ip_address=self._pack_address(local_ip),
prefixlen=prefixlen,
next_hop_address=None,
next_hop_sw_if_index=None,
is_ipv6=router['is_ipv6'],
is_local=True)
if bvi_if_idx is not None:
self.vpp.set_interface_ip(
bvi_if_idx,
ip_interface((local_ip, router['prefixlen'],)))
# Set the router external interface corresponding to the local
# route as non-local.
for router in self.router_external_interfaces.values():
if ip_address(router['gateway_ip']) == \
ip_address(local_ip):
router['is_local'] = 0
LOG.debug('Router external %s is no longer a local '
'route but now assigned to the BVI', router)
else:
# At this point, we can safetly remove both the SNAT and BVI
# loopback interfaces as no local routes exist.
snat_interfaces = self.vpp.get_snat_interfaces()
# Get SNAT out interfaces whose IP addrs are overloaded
snat_out_interfaces = self.vpp.get_outside_snat_interface_indices()
# delete SNAT if set on this interface
if router['bvi_if_idx'] in snat_interfaces:
LOG.debug('Router: Deleting SNAT on interface '
'index: %s', router['bvi_if_idx'])
self.vpp.set_snat_on_interface(router['bvi_if_idx'],
is_inside=router['is_inside'],
is_add=False)
# Delete the external 1:N SNAT and default routes in all VRFs
# for external router interface deletion
if not router['is_inside']:
LOG.debug('Router: Deleting external gateway port %s for '
'router: %s', port_id, router)
# Delete external snat addresses for the router
if router['bvi_if_idx'] in snat_out_interfaces:
LOG.debug('Router:Removing 1:N SNAT on external interface '
'index: %s', router['bvi_if_idx'])
self.vpp.snat_overload_on_interface_address(
router['bvi_if_idx'],
is_add=False)
# Delete all exported routes into tenant VRFs belonging to this
# external gateway
self.export_routes_from_tenant_vrfs(
ext_gw_ip=router['external_gateway_ip'], is_add=False)
# delete the default route in the default VRF
self.default_route_in_default_vrf(router, is_add=False)
else:
# Delete all exported routes from this VRF
self.export_routes_from_tenant_vrfs(source_vrf=router[
'vrf_id'], is_add=False)
# Delete the gateway IP address and the BVI interface if this is
# the last IP address assigned on the BVI
if bvi_if_idx is not None:
# Get all IP's assigned to the BVI interface
addresses = self.vpp.get_interface_ip_addresses(bvi_if_idx)
if len(addresses) > 1:
# Dont' delete the BVI, only remove one IP from it
self.vpp.del_interface_ip(
bvi_if_idx,
ip_interface((router['gateway_ip'],
router['prefixlen'],)))
else:
# Last subnet assigned, delete the interface
self.vpp.delete_loopback(bvi_if_idx)
if cfg.CONF.ml2_vpp.enable_l3_ha:
self.router_interface_states.pop(bvi_if_idx, None)
# Remove any local GPE mappings
if router['net_type'] == TYPE_GPE and self.gpe is not None:
LOG.debug('Removing local GPE mappings for router '
'interface: %s', port_id)
self.gpe.delete_local_gpe_mapping(router['segmentation_id'],
router['mac_address'])
if not is_external:
self.router_interfaces.pop(port_id)
else:
self.router_external_interfaces.pop(port_id)
def maybe_associate_floating_ips(self):
"""Associate any pending floating IP addresses.
We may receive a request to associate a floating
IP address, when the router BVI interfaces are not ready yet. So,
we queue such requests and do the association when the router
interfaces are ready.
"""
LOG.debug('Router: maybe associating floating IPs: %s',
self.floating_ips)
for floatingip in self.floating_ips:
if not self.floating_ips[floatingip]['state']:
fixedip_addr = self.floating_ips[
floatingip]['fixed_ip_address']
floatingip_addr = self.floating_ips[
floatingip]['floating_ip_address']
loopback_idx = self.floating_ips[floatingip]['loopback_idx']
external_idx = self.floating_ips[floatingip]['external_idx']
self._associate_floatingip(floatingip, fixedip_addr,
floatingip_addr, loopback_idx,
external_idx)
def _associate_floatingip(self, floatingip, fixedip_addr,
floatingip_addr, loopback_idx, external_idx):
"""Associate the floating ip address and update state."""
# It is possible that during a VPP+agent restart scenario, the tenant's
# VRF has not been set on the loopback
tenant_vrf = self.vpp.get_interface_vrf(loopback_idx)
if not tenant_vrf:
LOG.debug("Router: Tenant VRF:%s not been set yet", tenant_vrf)
return
else:
LOG.debug('Router: Tenant VRF:%s, floating IP:%s and bvi_idx:%s',
tenant_vrf, floatingip_addr, loopback_idx)
LOG.debug("Router: associating floatingip:%s with fixedip: %s "
"loopback_idx:%s, external_idx:%s", floatingip_addr,
fixedip_addr, loopback_idx, external_idx)
snat_interfaces = self.vpp.get_snat_interfaces()
if loopback_idx and loopback_idx not in snat_interfaces:
self.vpp.set_snat_on_interface(loopback_idx)
if external_idx and external_idx not in snat_interfaces:
self.vpp.set_snat_on_interface(external_idx, is_inside=False)
#
# For different tenants mapped to different VRFs, it is quite possible
# that the same fixed IP addr is mapped to different floating IP addrs,
# for example:
#
# (192.168.10.5, FIP1, VRF1)
# (192.168.10.5, FIP2, VRF2)
#
# So, we check for (localip, extip, tenenat_vrf) in VPP before creating
# the mapping.
(localip, extip) = (ip_address(fixedip_addr),
ip_address(floatingip_addr))
add_nat_mapping = True
for m in self.vpp.get_snat_static_mappings():
if (localip == m.local_ip_address and
extip == m.external_ip_address and tenant_vrf == m.vrf_id):
add_nat_mapping = False
if add_nat_mapping:
LOG.debug("Router: setting 1:1 SNAT %s:%s in tenant_vrf:%s",
fixedip_addr, floatingip_addr, tenant_vrf)
self.vpp.set_snat_static_mapping(localip, extip,
tenant_vrf)
# Clear any dynamic NAT sessions for the 1:1 NAT to take effect
self.vpp.clear_snat_sessions(localip)
self.floating_ips[floatingip]['tenant_vrf'] = tenant_vrf
self.floating_ips[floatingip]['state'] = True
LOG.debug('Router: Associated floating IP: %s',
self.floating_ips[floatingip])
else:
LOG.debug('Router: SNAT 1:1 mapping already exists for floating'
'IP: %s', self.floating_ips[floatingip])
def associate_floatingip(self, floatingip, floatingip_dict):
"""Add the VPP configuration to support One-to-One SNAT.
Arguments:-
floating_ip: The UUID of the floating ip address
floatingip_dict : The floating ip data
"""
LOG.debug("Router: Checking for existing association for"
" floating ip: %s", floatingip)
if floatingip in self.floating_ips:
self.disassociate_floatingip(floatingip)
else:
LOG.debug("Router: Found no existing association for floating ip:"
" %s", floatingip)
LOG.debug('Router: Associating floating ip address: %s: %s',
floatingip, floatingip_dict)
loopback_idx, external_idx = self._get_snat_indexes(floatingip_dict)
LOG.debug('Router: Retrieved floating ip intf indxs- int:%s, ext:%s',
loopback_idx, external_idx)
self.floating_ips[floatingip] = {
'fixed_ip_address': floatingip_dict['fixed_ip_address'],
'floating_ip_address': floatingip_dict['floating_ip_address'],
'loopback_idx': loopback_idx,
'external_idx': external_idx,
'state': False
}
tenant_vrf = self.vpp.get_interface_vrf(loopback_idx)
# Associate the floating IP iff the router has established a tenant
# VRF i.e. a vrf_id > 0
if tenant_vrf:
LOG.debug("Router: associate_floating_ip: tenant_vrf:%s BVI:%s",
tenant_vrf, loopback_idx)
self.floating_ips[floatingip]['tenant_vrf'] = tenant_vrf
self._associate_floatingip(floatingip,
floatingip_dict['fixed_ip_address'],
floatingip_dict['floating_ip_address'],
loopback_idx,
external_idx)
else:
self.floating_ips[floatingip]['tenant_vrf'] = 'undecided'
def disassociate_floatingip(self, floatingip):
"""Remove the VPP configuration used by One-to-One SNAT.
Arguments:-
floating_ip: The UUID of the floating ip address to be disassociated.
"""
LOG.debug('Router: Disassociating floating ip address:%s',
floatingip)
# Check if we know about this floating ip address
floatingip_dict = self.floating_ips.get(floatingip)
if floatingip_dict:
# Delete the SNAT internal and external IP address mapping.
LOG.debug('Router: deleting NAT mappings for floating ip: %s',
floatingip)
snat_local_ipaddresses = self.vpp.get_snat_local_ipaddresses()
if floatingip_dict['fixed_ip_address'] in snat_local_ipaddresses:
self.vpp.set_snat_static_mapping(
ip_address(floatingip_dict['fixed_ip_address']),
ip_address(floatingip_dict['floating_ip_address']),
floatingip_dict['tenant_vrf'],
is_add=False)
self.floating_ips.pop(floatingip)
else:
LOG.debug('router: floating ip address: %s not found to be '
'disassociated', floatingip)
def get_spoof_filter_rules(self):
"""Build and return a list of anti-spoofing rules.
Returns a dict with two keys named: ingress_rules and egress_rules
ingress_rules = a list of ingress rules
egress_rules = a list of egress rules
"""
def _compose_rule(is_permit,
src_prefix,
dst_prefix,
proto,
srcport_or_icmptype_first,
srcport_or_icmptype_last,
dstport_or_icmpcode_first,
dstport_or_icmpcode_last):
# Set is_permit = 2 if reflexive_acls and tcp/udp/ip traffic
if is_permit == 1 and reflexive_acls and proto in [6, 17, 0]:
is_permit = 2
return {
'is_permit': is_permit,
'src_prefix': ip_network(src_prefix),
'dst_prefix': ip_network(dst_prefix),
'proto': proto,
'srcport_or_icmptype_first': srcport_or_icmptype_first,
'srcport_or_icmptype_last': srcport_or_icmptype_last,
'dstport_or_icmpcode_first': dstport_or_icmpcode_first,
'dstport_or_icmpcode_last': dstport_or_icmpcode_last
}
# Ingress filter rules to allow DHCP and ICMPv6 into VM
# Allow incoming DHCP offer packets from dhcp servers
# UDP src_port 67 (ipv4 dhcp server) and dst_port 68 (dhclient)
# UDP src_port 547 (ipv6 dhserver) and dst_port 546 (ipv6 dclient)
ingress_rules = [
_compose_rule(1, '0.0.0.0/0', '0.0.0.0/0',
17, 67, 67, 68, 68),
_compose_rule(1, '::/0', '::/0',
17, 547, 547, 546, 546),
]
# Allow Icmpv6 Multicast listener Query, Report, Done (130,131,132)
# neighbor soliciation (135) and neighbor advertisement (136) and
# MLD2_REPORT (143) and ICMP_RA into the Instance
ICMP_RA = n_const.ICMPV6_TYPE_RA
for ICMP_TYPE in [130, 131, 132, 135, 136, 143, ICMP_RA]:
ingress_rules.append(
_compose_rule(1, '::/0', '::/0',
58, ICMP_TYPE, ICMP_TYPE, 0, 255)
)
# Egress spoof_filter rules from VM
# Permit DHCP client packets (discovery + request)
# UDP src_port 68 (ipv4 client) and dst_port 67 (ipv4 dhcp server)
# UDP src_port 546 (ipv6 client) and dst_port 547 (ipv6 dhcp server)
# Drop DHCP Offer packets originating from VM
# src_port 67 and dst_port 68
# src_port 547 and dst_port 546
# Drop icmpv6 Router Advertisements from VMs.
# Allow other outgoing icmpv6 packets
# When packets are fragmented (as UCP(v6), ICMP(v6) and TCPv4 packets
# all can be, VPP will match any fragment against the first rule
# relating to that address and protocol. It ignores things like ports
# and ICMP types because they aren't in the second and later fragments.
#
# If you want second and later fragments to get through, the first rule
# that matches them *must* be a 'permit' rule.
#
# In our case it only happens for ICMPv6; we add a permit rule on an
# invalid code to pre-empt the RA deny when matching fragments.
# For TCPv4/v6, and ICMPv4, we don't have deny rules in spoof SG. so we
# are good;
# For UDPv4/v6, we do have a permit rule of DHCPv4/v6, so we are good;
# For ICMPv6, we are adding a dummy permit rule to workaround this;
egress_rules = [
_compose_rule(1, '0.0.0.0/0', '0.0.0.0/0',
17, 68, 68, 67, 67),
_compose_rule(1, '::/0', '::/0',
17, 546, 546, 547, 547),
_compose_rule(0, '0.0.0.0/0', '0.0.0.0/0',
17, 67, 67, 68, 68),
_compose_rule(0, '::/0', '::/0',
17, 547, 547, 546, 546),
# Permits ICMPv6 fragments while not permitting (valid)
# packets (type 0 is invalid)
_compose_rule(1, '::/0', '::/0',
58, 0, 0, 0, 0),
# ... because this rule would otherwise match fragments, being
# the first rule, and would deny them
_compose_rule(0, '::/0', '::/0',
58, ICMP_RA, ICMP_RA, 0, 255),
_compose_rule(1, '::/0', '::/0',
58, 0, 255, 0, 255),
# Permit TCP port 80 traffic to 169.254.169.254/32 for metadata
_compose_rule(1, '0.0.0.0/0', '169.254.169.254/32',
6, 0, 65535, 80, 80),
]
return {'ingress': ingress_rules,
'egress': egress_rules}
LEADIN = nvpp_const.LEADIN # TODO(ijw): make configurable?
# TrunkWatcher thread's heartbeat interval
# TODO(onong): make it configurable if need be
TRUNK_WATCHER_HEARTBEAT = 30
class EtcdListener(object):
def __init__(self, host, client_factory, vppf, physnets):
self.host = host
self.client_factory = client_factory
self.vppf = vppf
self.physnets = physnets
self.pool = eventlet.GreenPool()
self.secgroup_enabled = cfg.CONF.SECURITYGROUP.enable_security_group
# Add GPE key-watching, if required
self.gpe_listener: Optional[gpe.GpeListener]
if TYPE_GPE in cfg.CONF.ml2.type_drivers:
self.gpe_listener = gpe.GpeListener(self)
else:
self.gpe_listener = None
# These data structures are used as readiness indicators.
# A port is only in here only if the attachment part of binding
# has completed.
# key: ifidx of port; value: (UUID, bound-callback, vpp-prop-dict)
self.iface_state = {}
# key: UUID of port; value: ifidx
self.iface_state_ifidx = {}
# Members of this are ports requiring security groups with unsatisfied
# requirements.
self.iface_awaiting_secgroups = {}
# Sub-ports of a trunk with pending port bindings.
# trunk_port ID => List(sub_ports awaiting binding)
# When the agent is restarted, it could receive an etcd watch event
# to bind subports even before the parent port itself is bound. This
# dict keeps tracks of such sub_ports. They will be reconsidered
# for binding after the parent is bound.
self.subports_awaiting_parents = {}
# bound subports of parent ports
# trunk_port ID => set(bound subports)
self.bound_subports = defaultdict(set)
# We also need to know if the vhostuser interface has seen a socket
# connection: this tells us there's a state change, and there is
# a state detection function on self.vppf.
self.vppf.vhost_ready_callback = self._vhost_ready
def unbind(self, id):
if id not in self.iface_state_ifidx:
# Unbinding an unknown port
return
if self.iface_state_ifidx[id] in self.iface_state:
del self.iface_state[self.iface_state_ifidx[id]]
del self.iface_state_ifidx[id]
self.vppf.unbind_interface_on_host(id)
def bind(self, bound_callback, id, binding_type, mac_address, physnet,
network_type, segmentation_id, security_data):
"""Bind an interface as instructed by ML2 on this host.
The interface as a network and binding type. Assuming the
network as been dropped onto the physnet specified, bind
that uplink to the interface in question by creating an
interface of the appropriate form and propagating the network
to it.
This call also identifies if we should consider the interface
fully up. This may happen now, or, asynchronously, later,
depending on whether all the prerequisites are in place. That
includes the behaviour of whatever's on the other end of the
interface.
"""
# args['binding_type'] in ('vhostuser', 'tap'):
# For GPE, fetch remote mappings from etcd for any "new" network
# segments we will be binding to so we are aware of all the remote
# overlay (mac) to underlay (IP) values
if network_type == TYPE_GPE and self.gpe_listener is not None:
# For GPE, a physnet value is not messaged by ML2 as it
# is not specified for creating a gpe tenant network. Hence for
# these net types we replace the physnet with the value of
# gpe_locators, which stand for the physnet name.
physnet = self.gpe_listener.physnet()
self.gpe_listener.ensure_gpe_remote_mappings(segmentation_id)
props = self.vppf.bind_interface_on_host(binding_type,
id,
mac_address,
physnet,
network_type,
segmentation_id)
if props is None:
# Problems with the binding
# We will never notify anyone this port is ready.
return None
# Store the binding information. We put this into
# etcd when the interface comes up to show that things
# are ready and expose it to curious operators, who may
# be able to debug with it. This may not happen
# immediately because the far end may not have connected.
iface_idx = props['iface_idx']
port_security_enabled = security_data.get('port_security_enabled',
True)
if port_security_enabled:
self.iface_awaiting_secgroups[iface_idx] = \
security_data.get('security_groups', [])
else:
# 'None' is a special value indicating no port security
self.iface_awaiting_secgroups[iface_idx] = None
self.iface_state[iface_idx] = (id, bound_callback, props)
self.iface_state_ifidx[id] = iface_idx
self.apply_spoof_macip(iface_idx, security_data, props)
self.maybe_apply_secgroups(iface_idx)
def vpp_restart_prepare(self):
"""On a restart, find bound ports and clean up unwanted config
Does the following:
- fixes uplinks
- identifies the ports we bound previously - they may need
removing or updating
Ports intended to be bound will have .bind() called later
in the resync, which will correcly populate VPPForwarder
structures and fix bindings whose type has changed; ports
that are no longer needed will be unbound.
Returns a set of bound ports
"""
LOG.debug('Repairing physnets in VPP')
self.vppf.fix_physnets(self.physnets)
LOG.debug('VPP has been cleaned of stale physnets')
return self.vppf.find_bound_ports()
def apply_spoof_macip(self, iface_idx, security_data, props):
"""Apply non-secgroup security to a port
This is an idempotent function to set up the port security
(antispoof and allowed-address-pair) that can be determined
solely from the data on the port itself.
"""
# TODO(ijw): this is a convenience for spotting L3 and DHCP
# ports, but it's not the right way
is_secured_port = props['bind_type'] == 'vhostuser'
port_security_enabled = security_data.get('port_security_enabled',
True)
# If (security-groups and port_security)
# are enabled and it's a vhostuser port
# proceed to set L3/L2 ACLs, else skip security
if (self.secgroup_enabled and
port_security_enabled and
is_secured_port):
# Set Allowed address pairs and mac-spoof filter
aa_pairs = security_data.get('allowed_address_pairs', [])
self.set_mac_ip_acl_on_port(
security_data['mac_address'],
security_data.get('fixed_ips'),
aa_pairs,
iface_idx)
else:
self.vppf.remove_mac_ip_acl_on_port(iface_idx)
def reconsider_port_secgroups(self):
"""Check current port security state.
See if any of the ports awaiting security group ACL population can
now be secured.
"""
# TODO(ijw): could be more efficient in selecting ports to check
for iface_idx in self.iface_awaiting_secgroups.keys():
self.maybe_apply_secgroups(iface_idx)
def maybe_apply_secgroups(self, iface_idx):
"""Apply secgroups to a port if all constructs are available
This is an idempotent function to set up port security. It
relies on the pre-existence of the ACLs corresponding to
security groups, so it may or may not be possible to apply
security at this moment in time. If it is, the port is
recorded as secure (allowing binding to complete), and if it
isn't we will attempt to reapply as more security groups are
created.
It is reapplied if the security group list changes on the
port. It is not reapplied if the security group content is
changed, because the ACL number remains the same and therefore
so does the port config.
"""
secgroup_ids = self.iface_awaiting_secgroups[iface_idx]
try:
(id, bound_callback, props) = self.iface_state[iface_idx]
except KeyError: # The port was unbound before we could apply ACLs
LOG.info("Interface idx %s unbound before "
"security-group(s) could be applied", iface_idx)
self.iface_awaiting_secgroups.pop(iface_idx, None)
return
# TODO(ijw): this is a convenience for spotting L3 and DHCP
# ports, but it's not the right way
# (TODO(ijw) it's also the only reason we go to iface_state)
is_secured_port = props['bind_type'] == 'vhostuser'
# If security-groups are enabled and it's a port needing
# security proceed to set L3/L2 ACLs, else skip security.
# If security-groups are empty, apply the default spoof-acls.
# This is the correct behavior when security-groups are enabled but
# not set on a port.
if (self.secgroup_enabled and
secgroup_ids is not None and # port security off
is_secured_port):
if not self.vppf.maybe_set_acls_on_port(
secgroup_ids,
iface_idx):
# The ACLs for secgroups are not yet ready
# Leave ourselves in the pending list
return
else:
LOG.debug("Clearing port_security on "
"port %s", id)
self.vppf.remove_acls_on_port(
iface_idx)
# Remove with no error if not present
self.iface_awaiting_secgroups.pop(iface_idx, None)
self.maybe_up(iface_idx)
def _vhost_ready(self, id):
# The callback from VPP only knows the IP; convert
# .. and note that we may not know the conversion
iface_idx = self.iface_state_ifidx.get(id)
if iface_idx is None:
# Not a port we know about
return
self.maybe_up(iface_idx)
def maybe_up(self, iface_idx):
"""Flag that an interface is connected, if it is
This is a combination of 'we did our bit' and 'the other
end connected'. These can happen in either order; if
we resync, we recheck our binding but the other end
may have connected already.
This both tells Nova the interface is ready and brings the
interface up in VPP.
There is nothing wrong (other than a bit of inefficiency)
in sending this to Nova multiple times; the watching driver may
see the key write multiple times and will act accordingly.
"""
if iface_idx not in self.iface_state:
# Binding hasn't completed
return
(id, bound_callback, props) = self.iface_state[iface_idx]
# For trunk sub-ports, it's the parent vhostuser port that needs to
# be linked up
if 'parent_uuid' in props:
port_id = props['parent_uuid']
else:
port_id = id
if (props['bind_type'] == 'vhostuser' and
not self.vppf.vhostuser_linked_up(port_id)):
# vhostuser connection that hasn't yet found a friend
return
if iface_idx in self.iface_awaiting_secgroups:
return
LOG.debug('marking index %s as ready', id)
self.vppf.ifup(iface_idx)
bound_callback(id, props)
def acl_add_replace(self, secgroup, data):
"""Add or replace a VPP ACL.
Arguments:
secgroup - OpenStack SecurityGroup ID
data - SecurityGroup data from etcd
"""
def _secgroup_rule(r):
# Create a rule for the remote_ip_prefix (CIDR) value
if r['remote_ip_addr']:
remote_ip_prefixes = [(r['remote_ip_addr'],
r['ip_prefix_len'])]
# Create a rule for each ip address in the remote_group
else:
remote_group = r['remote_group_id']
prefix_length = 128 if r['is_ipv6'] else 32
ip_version = 6 if r['is_ipv6'] else 4
# Add the referencing secgroup ID to the remote-group lookup
# data set. This enables the RemoteGroupWatcher thread to
# lookup the secgroups that need to be updated for a
# remote-group etcd watch event
self.vppf.remote_group_secgroups[remote_group].add(secgroup)
remote_ip_prefixes = [
(ip, prefix_length) for port in
self.vppf.remote_group_ports[remote_group]
for ip in self.vppf.port_ips[port]
if ip_network(ip).version == ip_version]
LOG.debug("remote_group: vppf.remote_group_ports:%s",
self.vppf.remote_group_ports
)
LOG.debug("remote_group: vppf.port_ips:%s",
self.vppf.port_ips)
LOG.debug("remote_group_ip_prefixes:%s for group %s",
remote_ip_prefixes, remote_group)
LOG.debug("remote_group_secgroups: %s",
self.vppf.remote_group_secgroups)
# VPP API requires the IP addresses to be represented in binary
# At this point:
# 1. we convert to the form VPP likes - a packed address
# 2. we fix up the rule. At this point it's what Neutron gave us
# and Neutron doesn't strictly check that the rule is a network
# address compatible with the mask, but VPP cares. Our assumption
# is that only bits significant relative to the mask are intended
# to matter, though that's ill-defined in the Neutron API.
rules = []
for ip_addr, ip_prefix_len in remote_ip_prefixes:
# OpenStack should provide a network address here, but
# doesn't correctly validate input.
net = ip_interface((ip_addr, int(ip_prefix_len),)).network
packed_addr = net.network_address.packed
rules.append(SecurityGroupRule(r['is_ipv6'],
packed_addr,
ip_prefix_len,
r.get('remote_group_id', None),
r['protocol'],
r['port_min'],
r['port_max']))
return rules
ingress_rules, egress_rules = (
[_secgroup_rule(r) for r in data['ingress_rules']],
[_secgroup_rule(r) for r in data['egress_rules']]
)
# Flatten ingress and egress rules
ingress_rules, egress_rules = (
[rule for rule_list in ingress_rules for rule in rule_list],
[rule for rule_list in egress_rules for rule in rule_list]
)
LOG.debug("remote_group: sec_group: %s, ingress rules: %s "
"egress_rules: %s", secgroup, ingress_rules, egress_rules)
self.vppf.acl_add_replace_on_host(SecurityGroup(secgroup,
ingress_rules,
egress_rules))
def acl_delete(self, secgroup):
"""Delete ACL on host.
Arguments:
secgroup - OpenStack SecurityGroup ID
"""
self.vppf.acl_delete_on_host(secgroup)
def spoof_filter_on_host(self):
"""Deploy anti-spoofing ingress and egress ACLs on VPP.
Tag ingress spoof acl on VPP with ID: FFFF:0
Tag egress spoof acl on VPP with ID: FFFF:1
Add Spoof ACL mapping with Key: "FFFF"
Val: VppAcl(in_idx, out_idx)
to secgroups mapping
"""
self.vppf.spoof_filter_on_host()
def set_mac_ip_acl_on_port(self, mac_address, fixed_ips,
allowed_address_pairs, sw_if_index):
"""Set L2/L3 ACLs on port.
Arguments:-
mac_address - The mac_address assigned to the port
fixed_ips - A list of dictionaries containing the fixed_ips
assigned to the port identified by the key - 'ip_address'
allowed_address_pairs - A list of allowed address pair attributes
- Each address pair is a dict with
keys: ip_address (required)
mac_address (optional)
sw_if_index - VPP vhostuser if_idx
"""
# Allowed mac_ip list to permit for DHCP request from 0.0.0.0
# Allow Ipv6 link local address for neighbor discovery
# mac-ip-acls are egress only ACLs from an instance
lla_address = str(netutils.get_ipv6_addr_by_EUI64(
n_const.IPv6_LLA_PREFIX, mac_address))
allowed_mac_ips = [(mac_address, u'0.0.0.0'),
(mac_address, lla_address)]
# A list of tuples of MAC Addrs. and their corresponding IP Addrs.
fixed_ip_addrs = [ip['ip_address'] for ip in fixed_ips]
mac_ips = [(mac_address, ip_address) for ip_address
in fixed_ip_addrs]
# use the port-mac if a mac_address is not present in the allowed
# address pair
addr_pairs = [(p.get('mac_address', mac_address), p['ip_address'])
for p in allowed_address_pairs]
mac_ips = allowed_mac_ips + mac_ips + addr_pairs
self.vppf.set_mac_ip_acl_on_vpp_port(mac_ips, sw_if_index)
def load_macip_acl_mapping(self) -> None:
"""Load the sw_if_index to mac_ip_acl index mappings on vpp.
Populates self.vppf.port_vpp_acls :
{sw_if_index -> {'l23' : <macip_acl_index>}}
"""
try:
macip_acls = self.vppf.vpp.get_macip_acls()
# The acl position is the sw_if_index
for sw_if_index, acl_index in enumerate(macip_acls):
if acl_index != 4294967295: # Exclude invalid acl index
self.vppf.port_vpp_acls[sw_if_index]['l23'] = acl_index
except ValueError:
pass # vpp_papi throws this error when no ACLs exist
except AttributeError:
pass # cannot reference acl attribute - pass and exit
def update_remote_group_secgroups(self, remote_group):
"""Update the ACLs of all security groups that use a remote-group.
When a remote_group to port association is changed,
i.e. A new port is associated with (or) an existing port is removed,
the agent needs to update the VPP ACLs belonging to all the
security groups that use this remote-group in their rules.
Since this is called from various threads it makes a new etcd
client each call.
"""
secgroups = self.vppf.remote_group_secgroups[remote_group]
LOG.debug("Updating secgroups:%s referencing the remote_group:%s",
secgroups, remote_group)
etcd_client = self.client_factory.client()
etcd_writer = etcdutils.json_writer(etcd_client)
for secgroup in secgroups:
secgroup_key = self.secgroup_key_space + "/%s" % secgroup
# TODO(najoy):Update to the new per thread etcd-client model
# TODO(ijw): all keys really present?
# If the security group is deleted before the agent gets to it,
# handle the exception.
try:
data = etcd_writer.read(secgroup_key).value
LOG.debug("Updating remote_group rules %s for secgroup %s",
data, secgroup)
self.acl_add_replace(secgroup, data)
except etcd.EtcdKeyNotFound:
pass
# EtcdListener Trunking section
def reconsider_trunk_subports(self):
"""Try to bind subports awaiting their parent port to be bound.
If the parent port
- is bound
- instance has connected to the other end of the vhostuser
- security groups has been applied
- is in admin UP state
then:
- bind the subports, and
- set subport state to admin UP
"""
# Get the list of *currently* awaiting subports
# (allows us to change and clear up the dict as we go through them)
awaiting_subports = list(self.subports_awaiting_parents.items())
for parent_port, subports in awaiting_subports:
LOG.debug('reconsidering bind for trunk subports %s, parent %s',
subports, parent_port)
props = self.vppf.interfaces.get(parent_port, None)
# Make sure parent port is really ready
if (props and props['iface_idx'] in self.iface_state and
self.vppf.vhostuser_linked_up(parent_port) and
props['iface_idx'] not in self.iface_awaiting_secgroups):
LOG.debug("Parent trunk port vhostuser ifidx %s is ready",
props['iface_idx'])
self.bind_unbind_subports(parent_port, subports)
self.subports_awaiting_parents.pop(parent_port)
else:
LOG.debug("Parent trunk port is not ready")
def subports_to_unbind(self, parent_port, subports):
"""Return a list of subports to unbind for a parent port.
subports :- A set of subports that need to be currently bound
to the parent port.
"""
# unbind 'bound sub-ports' that are not in the current subports
return self.bound_subports[parent_port] - subports
def subports_to_bind(self, parent_port, subports):
"""Return a list of subports to unbind for a parent port.
subports :- A set of subports that need to be currently bound
to the parent port.
"""
# remove ports from subports that are already bound and only bind the
# new ports.
return subports - self.bound_subports[parent_port]
def bind_unbind_subports(self, parent_port, subports):
"""Bind or unbind the subports of the parent ports as needed.
To unbind all bound subports of a parent port, provide the
parent_port argument with subports set to an empty list.
Sample subports data structure: List of dicts
[{"segmentation_id": 11,
"uplink_seg_id": 149,
"segmentation_type": "vlan",
"uplink_seg_type": "vlan",
"port_id": "9ee91c37-9150-49ff-9ea7-48e98547771a",
"physnet": "physnet1",
"allowed_address_pairs": [],
"port_security_enabled": true,
"security_groups": ["8d55a44a-935d-4296-99ab-b0749b725df4"],
"bound_callback" : bind_notifier_object,
},
{"segmentation_id": 12,
"uplink_seg_id": 139,
"segmentation_type": "vlan",
"uplink_seg_type": "vlan",
"port_id": "2b1a89ba-78f1-4350-b71a-7caf7f23cbcf",
"physnet": "physnet1",
"allowed_address_pairs": [],
"port_security_enabled": true,
"security_groups": ["8d55a44a-935d-4296-99ab-b0749b725df4"],
"bound_callback" : bind_notifier_object,
}
]
"""
LOG.debug('Binding or Unbinding subports %s of parent trunk port %s',
subports, parent_port)
subport_set = set([p['port_id'] for p in subports])
subports_to_bind = self.subports_to_bind(parent_port, subport_set)
LOG.debug('Binding subports %s of a parent trunk port %s',
subports_to_bind, parent_port)
subports_to_unbind = self.subports_to_unbind(parent_port,
subport_set)
LOG.debug('Unbinding subports %s of a parent trunk port %s',
subports_to_unbind, parent_port)
# bind subports we are told to bind
for subport in subports_to_bind:
subport_data = [p for p in subports
if p['port_id'] == subport][0]
LOG.debug('Binding subport %s of parent trunk port %s '
'sub_port_data %s',
subport, parent_port, subport_data)
props = self.vppf.bind_subport_on_host(parent_port, subport_data)
# Bring up the subport
if props:
self.bound_subports[parent_port].add(subport)
subport_iface_idx = props['iface_idx']
LOG.debug("Bringing up the trunk subport vhost ifidx %s",
subport_iface_idx)
self.vppf.ifup(subport_iface_idx)
# Set port security on subport
LOG.debug("Setting port security on trunk subport ifidx %s",
subport_iface_idx)
port_security_enabled = subport_data.get(
'port_security_enabled',
True)
if port_security_enabled:
self.iface_awaiting_secgroups[subport_iface_idx] = \
subport_data.get('security_groups', [])
else:
self.iface_awaiting_secgroups[subport_iface_idx] = None
id = subport_data['port_id']
self.iface_state[subport_iface_idx] = (
id,
subport_data['bound_callback'],
props
)
self.iface_state_ifidx[id] = subport_iface_idx
self.apply_spoof_macip(subport_iface_idx, subport_data, props)
self.maybe_apply_secgroups(subport_iface_idx)
# unbind subports we are told to unbind
for subport in subports_to_unbind:
LOG.debug('Unbinding subport %s of parent_port %s',
subport, parent_port)
if self.iface_state_ifidx[subport] in self.iface_state:
del self.iface_state[self.iface_state_ifidx[subport]]
del self.iface_state_ifidx[subport]
self.vppf.unbind_subport_on_host(subport)
self.bound_subports[parent_port].remove(subport)
AGENT_HEARTBEAT = 60 # seconds
def process_ops(self):
# TODO(ijw): needs to remember its last tick on reboot, or
# reconfigure from start (which means that VPP needs it
# storing, so it's lost on reboot of VPP)
self.port_key_space = LEADIN + "/nodes/%s/ports" % self.host
self.router_key_space = LEADIN + "/nodes/%s/routers" % self.host
self.secgroup_key_space = LEADIN + "/global/secgroups"
self.state_key_space = LEADIN + "/state/%s/ports" % self.host
self.physnet_key_space = LEADIN + "/state/%s/physnets" % self.host
self.remote_group_key_space = LEADIN + "/global/remote_group"
self.trunk_key_space = LEADIN + "/nodes/%s/trunks" % self.host
etcd_client: Optional[etcd.Client]
etcd_helper: Optional[etcdutils.EtcdHelper]
etcd_client = self.client_factory.client()
etcd_helper = etcdutils.EtcdHelper(etcd_client)
# We need certain directories to exist so that we can write to
# and watch them
etcd_helper.ensure_dir(self.port_key_space)
etcd_helper.ensure_dir(self.secgroup_key_space)
etcd_helper.ensure_dir(self.state_key_space)
etcd_helper.ensure_dir(self.physnet_key_space)
etcd_helper.ensure_dir(self.router_key_space)
etcd_helper.ensure_dir(self.remote_group_key_space)
etcd_helper.ensure_dir(self.trunk_key_space)
etcd_helper.clear_state(self.state_key_space)
# py3 note: in py3 keys() does not return a list but the following
# seems to work fine. Enclose in list() is problems arise.
physnets = self.physnets.keys()
etcd_helper.clear_state(self.physnet_key_space)
for f in physnets:
etcd_client.write(self.physnet_key_space + '/' + f, 1)
# We need to be wary not to hand the same client to multiple threads;
# this etcd_helper and client dies here
etcd_helper = None
etcd_client = None
# load sw_if_index to macip acl index mappings
self.load_macip_acl_mapping()
self.binder = BindNotifier(self.client_factory, self.state_key_space)
self.pool.spawn(self.binder.run)
if self.secgroup_enabled:
LOG.debug("loading VppAcl map from acl tags for "
"performing secgroup_watcher lookups")
known_secgroup_ids = self.vppf.populate_secgroup_acl_mappings()
LOG.debug("Adding ingress/egress spoof filters "
"on host for secgroup_watcher spoof blocking")
self.spoof_filter_on_host()
LOG.debug("Spawning secgroup_watcher..")
self.pool.spawn(SecGroupWatcher(self.client_factory.client(),
'secgroup_watcher',
self.secgroup_key_space,
known_secgroup_ids,
heartbeat=self.AGENT_HEARTBEAT,
data=self).watch_forever)
self.pool.spawn(RemoteGroupWatcher(self.client_factory.client(),
'remote_group_watcher',
self.remote_group_key_space,
heartbeat=self.AGENT_HEARTBEAT,
data=self).watch_forever)
# The security group watcher will load the secgroups before
# this point (before the thread is spawned) - that's helpful,
# because it means that the ports will be immediately createable
# as the secgroups are already available.
LOG.debug("Spawning port_watcher")
self.pool.spawn(PortWatcher(self.client_factory.client(),
'port_watcher',
self.port_key_space,
heartbeat=self.AGENT_HEARTBEAT,
data=self).watch_forever)
# Spawn trunk watcher if enabled
if 'vpp-trunk' in cfg.CONF.service_plugins:
LOG.debug("Spawning trunk_watcher")
self.pool.spawn(TrunkWatcher(self.client_factory.client(),
'trunk_watcher',
self.trunk_key_space,
heartbeat=TRUNK_WATCHER_HEARTBEAT,
data=self).watch_forever)
# Spawn GPE watcher for GPE tenant networks
if self.gpe_listener is not None:
self.gpe_listener.spawn_watchers(self.pool,
self.AGENT_HEARTBEAT,
self)
# Spawning after the port bindings are done so that
# the RouterWatcher doesn't do unnecessary work
if 'vpp-router' in cfg.CONF.service_plugins:
if cfg.CONF.ml2_vpp.enable_l3_ha:
LOG.info("L3 HA is enabled")
LOG.debug("Spawning router_watcher")
self.pool.spawn(RouterWatcher(self.client_factory.client(),
'router_watcher',
self.router_key_space,
heartbeat=self.AGENT_HEARTBEAT,
data=self).watch_forever)
self.pool.waitall()
class PortWatcher(etcdutils.EtcdChangeWatcher):
def __init__(self, *args, **kwargs):
super(PortWatcher, self).__init__(*args, **kwargs)
self.etcd_client.write(LEADIN + '/state/%s/alive' %
self.data.host,
1, ttl=3 * self.heartbeat)
def do_tick(self):
# The key that indicates to people that we're alive
# (not that they care)
self.etcd_client.refresh(LEADIN + '/state/%s/alive' %
self.data.host,
ttl=3 * self.heartbeat)
def init_resync_start(self):
"""Identify known ports in VPP
We are beginning a resync because the agent has
restarted. We should be fixing VPP with the least
disruption possible so that traffic being passed by VPP
on currently configured ports is not disrupted. As such,
this goes to find correctly configured ports (which -
if still required - will be left alone) and removes
structures that have been partially or incorrectly set up.
"""
self.expected_keys = self.data.vpp_restart_prepare()
def removed(self, port):
# Removing key == desire to unbind
try:
is_gpe = False
port_data = self.data.vppf.interfaces[port]
port_net = port_data['net_data']
is_gpe = port_net['network_type'] == TYPE_GPE \
and self.data.gpe_listener is not None
if is_gpe:
# Get seg_id and mac to delete any gpe mappings
seg_id = port_net['segmentation_id']
mac = port_data['mac']
except KeyError:
# On initial resync, this information may not
# be available; also, the network may not
# be gpe
if is_gpe:
LOG.warning('Unable to delete GPE mappings for port')
self.data.unbind(port)
# Unlike bindings, unbindings are immediate.
try:
self.etcd_client.delete(
self.data.state_key_space + '/%s'
% port)
if is_gpe:
self.data.gpe_listener.delete_etcd_gpe_remote_mapping(
seg_id, mac)
except etcd.EtcdKeyNotFound:
# Gone is fine; if we didn't delete it
# it's no problem
pass
def added(self, port, value):
# Create or update == bind
# In EtcdListener, bind *ensures correct
# binding* and is idempotent. It will also
# fix up security if the security state has
# changed. NB most things will not change on
# an update.
data = jsonutils.loads(value)
# For backward comatibility reasons, 'plugtap' now means 'tap'
# Post-17.07 'tap' is used, but this allows compatibility with
# previously stored information in etcd.
binding_type = data['binding_type']
if binding_type == 'plugtap':
binding_type = 'tap'
self.data.bind(
self.data.binder.add_notification,
port,
binding_type,
# NB(onong): VM's mac is needed to be programmed as the lisp local
# eid for data flow in gpe networks across compute nodes so please
# do not change the line below without proper consideration.
data['mac_address'],
data['physnet'],
data['network_type'],
data['segmentation_id'],
data # TODO(ijw) convert incoming to security fmt
)
# While the bind might fail for one reason or another,
# we have nothing we can do at this point. We simply
# decline to notify Nova the port is ready.
# For GPE networks,
# write the remote mapping data to etcd to
# propagate both the mac to underlay mapping and
# mac to instance's IP (for ARP) mapping to all
# agents that bind this segment using GPE
if data['network_type'] == TYPE_GPE \
and self.data.gpe_listener is not None:
# NB(onong): The VM's mac needs to be programmed in the remote
# mappings. Without this no communication is possible between VMs
# running on separate compute nodes.
mac = data['mac_address']
for ip in [ip['ip_address'] for ip in data.get('fixed_ips')]:
self.data.gpe_listener.add_etcd_gpe_remote_mapping(
data['segmentation_id'], mac, ip)
class RouterWatcher(etcdutils.EtcdChangeWatcher):
"""Start an etcd watcher for router operations.
Starts an etcd watcher on the /router directory for
this node. This watcher is responsible for consuming
Neutron router CRUD operations.
"""
# TODO(ijw): consider how to remove GPE references from the router
# code, as they *should* be dealt with by port binding functions.
def do_tick(self):
pass
def parse_key(self, router_key):
"""Parse the key into two tokens and return a tuple.
The returned tuple is denoted by (token1, token2).
If token1 == "floatingip", then token2 is the ID of the
floatingip that is added or removed on the server.
If, token1 == router_ID and token2 == port_ID of the router
interface that is added or removed.
If, token1 == 'ha', then we return that token for router watcher
to action.
"""
m = re.match('([^/]+)' + '/([^/]+)', router_key)
floating_ip, router_id, port_id = None, None, None
if m and m.group(1) and m.group(2):
if m.group(1) == 'floatingip':
floating_ip = m.group(2)
return ('floatingip', floating_ip)
else:
router_id = m.group(1)
port_id = m.group(2)
return (router_id, port_id)
else:
return (None, None)
def add_remove_gpe_mappings(self, port_id, router_data, is_add=1):
"""Add a GPE mapping to the router's loopback mac-address."""
if router_data.get('external_gateway_info', False):
loopback_mac = self.data.vppf.router_external_interfaces[
port_id]['mac_address']
else:
loopback_mac = self.data.vppf.router_interfaces[
port_id]['mac_address']
# GPE remote mappings are added for only the master L3 router,
# if ha_enabled
ha_enabled = cfg.CONF.ml2_vpp.enable_l3_ha
if is_add:
if (ha_enabled and self.data.vppf.router_state) or not ha_enabled:
self.data.gpe_listener.add_etcd_gpe_remote_mapping(
router_data['segmentation_id'],
loopback_mac,
router_data['gateway_ip'])
else:
self.data.gpe_listener.delete_etcd_gpe_remote_mapping(
router_data['segmentation_id'],
loopback_mac)
def added(self, router_key, value):
token1, token2 = self.parse_key(router_key)
if token1 and token2:
if token1 != 'floatingip':
port_id = token2
router_data = jsonutils.loads(value)
self.data.vppf.ensure_router_interface_on_host(
port_id, router_data)
self.data.vppf.maybe_associate_floating_ips()
if router_data.get('net_type') == TYPE_GPE:
self.add_remove_gpe_mappings(port_id, router_data,
is_add=1)
else:
floating_ip = token2
floatingip_dict = jsonutils.loads(value)
self.data.vppf.associate_floatingip(floating_ip,
floatingip_dict)
if cfg.CONF.ml2_vpp.enable_l3_ha and router_key == 'ha':
LOG.debug('Setting VPP-Router HA State..')
router_state = bool(jsonutils.loads(value))
LOG.debug('Router state is: %s', router_state)
# Become master if a state is True, else become backup
state = 'MASTER' if router_state else 'BACKUP'
LOG.debug('VPP Router HA state has become: %s', state)
self.data.vppf.router_state = router_state
if router_state:
self.data.vppf.become_master_router()
else:
self.data.vppf.become_backup_router()
# Update remote mappings for GPE bound router ports
if self.data.gpe_listener:
self.data.gpe_listener.update_router_gpe_mappings()
def removed(self, router_key):
token1, token2 = self.parse_key(router_key)
if token1 and token2:
if token1 != 'floatingip':
port_id = token2
router_data = self.data.vppf.router_interfaces.get(port_id)
# Delete the GPE mapping first as we need to lookup the
# router interface mac-address from vppf
if router_data and router_data.get('net_type') == TYPE_GPE:
self.add_remove_gpe_mappings(port_id, router_data,
is_add=0)
self.data.vppf.delete_router_interface_on_host(port_id)
else:
floating_ip = token2
self.data.vppf.disassociate_floatingip(floating_ip)
class SecGroupWatcher(etcdutils.EtcdChangeWatcher):
def __init__(self, etcd_client, name, watch_path,
known_keys,
**kwargs):
self.known_keys = known_keys
super(SecGroupWatcher, self).__init__(
etcd_client, name, watch_path, **kwargs)
def init_resync_start(self):
# TODO(ijw): we should probably do the secgroup work
# here rather than up front
return self.known_keys
def do_tick(self):
pass
def removed(self, secgroup):
self.data.acl_delete(secgroup)
def added(self, secgroup, value):
# create or update a secgroup == add_replace vpp acl
data = jsonutils.loads(value)
self.data.acl_add_replace(secgroup, data)
self.data.reconsider_port_secgroups()
class RemoteGroupWatcher(etcdutils.EtcdChangeWatcher):
"""Details on how the remote-group-id rules are updated by the vpp-agent.
This thread watches the remote-group key space.
When VM port associations to security groups are updated, this thread
receives an etcd watch event from the server. From the watch event,
the thread figures out the set of ports associated with the
remote-group-id and the IP addresses of each port.
After this, this thread updates two data structures.
The first one is a dictionary named port_ips, used to keep track of
the ports to their list of IP addresses. It has the port UUID as the key,
and the value is it's set of IP addresses. The second DS is a dict named
remote_group_ports. This is used to keep track of port memberships in
remote-groups. The key is the remote_group_id and the value is the set of
ports associated with it. These two dictionaries are updated by the thread
whenever watch events are received, so the agent always has up to date
information on ports, their IPs and the remote-groups association.
The RemoteGroupWatcher thread then calls a method named
update_remote_group_secgroups with the remote_group_id as the argument.
This method figures out which secgroups need to be updated as a result of
the watch event. This is done by looking up another dict named
remote_group_secgroups that keeps track of all the secgroups that are
referencing the remote-group-id inside their rules.
The key is the remote-group, and the value is the set of secgroups that
are dependent on it.
The update_remote_group_secgroups method then reads the rules for each of
these referencing security-groups and sends it to the method named
acl_add_replace with the security-group-uuid and rules as the argument.The
acl_add_replace method takes each rule that contains the remote-group-id
and computes a product using the list of IP addresses belonging to all
the ports in the remote-group. It then calls the acl_add_replace method
in vppf to atomically update the relevant VPP ACLs for the security-group.
"""
def do_tick(self):
pass
def parse_key(self, remote_group_key):
m = re.match('([^/]+)' + '/([^/]+)', remote_group_key)
remote_group_id, port_id = None, None
if m:
remote_group_id = m.group(1)
port_id = m.group(2)
return (remote_group_id, port_id)
def added(self, remote_group_key, value):
# remote_group_key format is "remote_group_id/port_id"
# Value is a list of IP addresses
remote_group_id, port_id = self.parse_key(remote_group_key)
if value and remote_group_id and port_id:
ip_addrs = jsonutils.loads(value)
# The set of IP addresses configured on a port
self.data.vppf.port_ips[port_id] = set(ip_addrs)
# The set of ports in a security-group
self.data.vppf.remote_group_ports[remote_group_id].update(
[port_id])
LOG.debug("Current remote_group_ports: %s port_ips: %s",
self.data.vppf.remote_group_ports,
self.data.vppf.port_ips)
self.data.update_remote_group_secgroups(remote_group_id)
def removed(self, remote_group_key):
remote_group_id, port_id = self.parse_key(remote_group_key)
if remote_group_id and port_id:
# Remove the port_id from the remote_group
self.data.vppf.remote_group_ports[
remote_group_id].difference_update([port_id])
LOG.debug("Current remote_group_ports: %s port_ips: %s",
self.data.vppf.remote_group_ports,
self.data.vppf.port_ips)
self.data.update_remote_group_secgroups(remote_group_id)
class TrunkWatcher(etcdutils.EtcdChangeWatcher):
"""Watches trunk parent/subport bindings on the host and takes actions.
Trunk keyspace format.
/networking-vpp/nodes/<node-name>/trunks/<UUID of the trunk>
Sample data format:
{"status": "ACTIVE",
"name": "trunk-new",
"admin_state_up": true,
"sub_ports": [
{"segmentation_id": 11,
"uplink_seg_id": 149,
"segmentation_type": "vlan",
"uplink_seg_type": "vlan",
"port_id": "9ee91c37-9150-49ff-9ea7-48e98547771a",
"physnet": "physnet1",
"allowed_address_pairs": [],
"mac_address": "fa:16:3e:c4:80:dc",
"port_security_enabled": true,
"fixed_ips": [{"subnet_id": "05cfd12c-9db8-4f55-a2b9-aca89f412932",
"ip_address": "10.110.110.7"}],
"security_groups": ["8d55a44a-935d-4296-99ab-b0749b725df4"]},
{"segmentation_id": 12,
"uplink_seg_id": 139,
"segmentation_type": "vlan",
"uplink_seg_type": "vlan",
"port_id": "2b1a89ba-78f1-4350-b71a-7caf7f23cbcf",
"physnet": "physnet1",
"allowed_address_pairs": [],
"mac_address": "fa:17:3e:c4:80:de",
"port_security_enabled": true,
"fixed_ips": [{"subnet_id": "05cfd12c-9db8-4f55-a2b9-aca89f412932",
"ip_address": "10.110.110.8"}],
"security_groups": ["8d55a44a-935d-4296-99ab-b0749b725df4"]},
]}
How does it work?
The ml2 server:
1) Writes above etcd key/value when a trunk port is bound on the host.
2) Updates the above value when subports on a bound trunk are updated.
3) Deletes the key when the trunk is unbound.
The trunkwatcher receives the watch event and it figures out whether
it should perform a bind or unbind action on the parent and its subport
and performs it.
"""
def do_tick(self):
"""Invoked every TRUNK_WATCHER_HEARTBEAT secs"""
# Check if there are child ports to be bound and brought UP
self.data.reconsider_trunk_subports()
def added(self, parent_port, value):
"""Bind and unbind sub-ports of the parent port."""
data = jsonutils.loads(value)
LOG.debug('trunk watcher received add for parent_port %s '
'with data %s', parent_port, data)
# Due to out-of-sequence etcd watch events during an agent restart,
# we do not yet know at this point whether the parent port is setup.
# So, we'll add it to the awaiting parents queue and reconsider it.
subport_data = data['sub_ports']
for subport in subport_data:
subport['bound_callback'] = lambda *args: None
self.data.subports_awaiting_parents[parent_port] = subport_data
# reconsider awaiting sub_ports
self.data.reconsider_trunk_subports()
def removed(self, parent_port):
"""Unbind all sub-ports and then unbind the parent port."""
LOG.debug('trunk watcher received unbound for parent port %s ',
parent_port)
# First, unbind all subports
self.data.bind_unbind_subports(parent_port, subports=[])
# Then, unbind the parent port if it has no subports
if not self.data.bound_subports[parent_port]:
LOG.debug('Unbinding the parent port %s', parent_port)
self.data.vppf.unbind_interface_on_host(parent_port)
class BindNotifier(object):
"""A thread to return bind-complete notifications to the server.
This notifies the completion of a bind by writing a state key with
the details of VPP's config (the other end doesn't care about the
content, only the key's presence, so this is purely a debugging
issue) to etcd.
"""
def __init__(self, client_factory, state_key_space):
# An infinite queue over which we receive notifications
self.notifications = eventlet.queue.Queue()
self.state_key_space = state_key_space
self.etcd_client = client_factory.client()
self.etcd_writer = etcdutils.json_writer(self.etcd_client)
def add_notification(self, id, content):
"""Queue a notification for sending to Nova
Nova watches a key's existence before sending out bind events.
We set the key, and use the value to store debugging
information.
"""
self.notifications.put((id, content,))
def run(self):
while(True):
try:
ent = self.notifications.get()
(port, props) = ent
# TODO(ijw): do we ever clean this space up?
self.etcd_writer.write(
self.state_key_space + '/%s' % port,
props)
except Exception:
# We must keep running, but we don't expect problems
LOG.exception("exception in bind-notify thread")
# If there are problems, retry the notification later.
# There's no issue if we do this multiple times.
self.add_notification(port, props)
class VPPRestart(object):
def __init__(self):
self.timeout = 10 # VPP connect timeout in seconds
LOG.debug("Agent is restarting VPP")
utils.execute(['service', 'vpp', 'restart'], run_as_root=True)
def wait(self):
time.sleep(self.timeout) # TODO(najoy): check if vpp is actually up
def openstack_base_setup(process_name):
"""General purpose entrypoint
Sets up non-specific bits (the integration with OpenStack and its
config, and so on).
"""
# Arguments, config files and options
cfg.CONF(sys.argv[1:])
# General logging
logging.setup(cfg.CONF, process_name)
# Guru meditation support enabled
gmr_opts.set_defaults(cfg.CONF)
gmr.TextGuruMeditation.setup_autorun(
version.version_info,
service_name='vpp-agent')
def main():
"""Main function for VPP agent functionality."""
openstack_base_setup('vpp_agent')
setup_privsep()
neutron.conf.plugins.ml2.config.register_ml2_plugin_opts(cfg.CONF)
neutron.conf.agent.securitygroups_rpc.register_securitygroups_opts(
cfg.CONF)
config_opts.register_vpp_opts(cfg.CONF)
# Pull physnets out of config and interpret them
if not cfg.CONF.ml2_vpp.physnets:
LOG.critical("Missing physnets config. Exiting...")
sys.exit(1)
physnet_list = cfg.CONF.ml2_vpp.physnets.replace(' ', '').split(',')
physnets = {}
for f in physnet_list:
if f:
try:
(k, v) = f.split(':')
except Exception:
LOG.error("Could not parse physnet to interface mapping "
"check the format in the config file: "
"physnets = physnet1:<interface1>, "
"physnet2:<interface>")
sys.exit(1)
if len(v) > MAX_PHYSNET_LENGTH:
LOG.error("Physnet '%(physnet_name)s' is longer than "
"%(len)d characters.",
{'physnet_name': v, 'len': MAX_PHYSNET_LENGTH})
sys.exit(1)
physnets[k] = v
# Deal with VPP-side setup
if cfg.CONF.ml2_vpp.enable_vpp_restart:
VPPRestart().wait()
# Convert to the minutes unit that VPP uses:
# (we round *up*)
# py3 note: using // since we want integer division
mac_age_min = int((cfg.CONF.ml2_vpp.mac_age + 59) // 60)
vppf = VPPForwarder(physnets,
mac_age=mac_age_min,
vpp_cmd_queue_len=cfg.CONF.ml2_vpp.vpp_cmd_queue_len
)
# Deal with etcd-side setup
LOG.debug("Using etcd host:%s port:%s user:%s password:***",
cfg.CONF.ml2_vpp.etcd_host,
cfg.CONF.ml2_vpp.etcd_port,
cfg.CONF.ml2_vpp.etcd_user)
client_factory = etcdutils.EtcdClientFactory(cfg.CONF.ml2_vpp)
# Do the work
ops = EtcdListener(cfg.CONF.host, client_factory, vppf, physnets)
names = cfg.CONF.ml2_vpp.vpp_agent_extensions
if names != '':
mgr = ExtensionManager(
'networking_vpp.vpp_agent.extensions',
names,
VPPAgentExtensionBase)
mgr.call_all('run', cfg.CONF.host, client_factory, vppf, ops.pool)
ops.process_ops()
if __name__ == '__main__':
main()
|
en
| 0.867024
|
# Copyright (c) 2017 Cisco Systems, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Note that it does *NOT* at this point have a persistent database, so # restarting this process will make net-vpp forget about every port it's # learned, which will not do your system much good (the data is in the # global 'backends' and 'ports' objects). This is for simplicity of # demonstration; we have a second codebase already defined that is # written to OpenStack endpoint principles and includes its ORM, so # that work was not repeated here where the aim was to get the APIs # worked out. The two codebases will merge in the future. # eventlet must be monkey patched early or we confuse urllib3. # noqa # A model of a bi-directional VPP ACL corresponding to a secgroup # TODO(najoy) Expose the below as a config option # Enable stateful reflexive ACLs in VPP which adds automatic reverse rules # When False, reverse rules are added by the vpp-agent and # VPP does not maintain any session states # We use eventlet for everything but threads. Here, we need an eventlet-based # locking mechanism, so we call out eventlet specifically rather than using # threading.Semaphore. # # Our own, strictly eventlet, locking: Root helper configured for privilege separation Use root helper (if present) to execute privileged commands ###################################################################### # This mirrors functionality in Neutron so that we're creating a name # that Neutron can find for its agents. ###################################################################### # TODO(ijw): should be pulled from Neutron or Nova - this naming # scheme is common between both # type: (str) -> str # type: (str) -> str # This is our internal name and the other end neither knows or cares about # it, only the bridge we put it in # type: (str) -> str ###################################################################### # Interface tagging naming scheme : # tap and vhost interfaces: port:<uuid> # Uplink Connectivity: uplink:<net_type>.<seg_id> # MAX_PHYSNET_LENGTH + the tag format must be <= the 64 bytes of a VPP tag Spot an uplink interface tag. Return (net_type, seg_id) or None if not an uplink tag # not tagged Spot a port interface tag Return uuid or None if not a port interface tag. # not tagged ###################################################################### # Security group tag formats used to tag ACLs in VPP for # re-identification on restart # When leaving VPP and entering the VM # When leaving the VM and entering VPP Work out if this tag is one of our common spoof filter tags # Matches the formats constructed earlier Used when ML2 has tried to ask for a weird binding type. Convert agent requirements into VPP calls This class has no interaction with etcd; other classes have no interaction with VPP. The job of this class is to turn the demands of etcd's data into VPP constructs. # physnet_name: interface-name # a Mapping of security groups to VPP ACLs # secgroup_uuid: VppAcl(ingress_idx, egress_idx) # Security group UUID to the set of associated port UUIDs # Port UUID to its set of IP addresses # Remote-group UUID to the set to security-groups that uses it # ACLs we ought to delete # Enable the GPE forwarder programming, if required # uuid: if idx # router_port_uuid: {} # router external interfaces # floating_ip_uuid: {} # Router BVI (loopback) interface states for L3-HA # {idx: state} 1 = UP, 0 = DOWN # VPP Router state variable is updated by the RouterWatcher # The default router state is the BACKUP. # If this node should be the master it will be told soon enough, # and this will prevent us from having two masters on any restart. # True = Master; False = Backup # mac_ip acls do not support atomic replacement. # Here we create a mapping of sw_if_index to VPP ACL indices # so we can easily lookup the ACLs associated with the interface idx # sw_if_index: {"l34": [l34_acl_indxs], "l23": l23_acl_index } # key: OpenStack port UUID; present when vhost-user is # connected and removed when we delete things. May accumulate # any other VPP interfaces too, but that's harmless. # Thread to drain the queues for binding tap interfaces into Linux # bridges # External devices detected by the device monitor # Device monitor to ensure the tap interfaces are plugged into the # right Linux bridge # The worker will be in endless loop, so don't care the return value # Start Vhostsocket filemonitor to bind sockets as soon as they appear. # Register to handle ON_CREATE event. # Register to handle ON_DELETE event. # We are expecting the port unbinding call flow to clean up vhost # sockets, hence ignoring delete events on vhost file handle. # Finally start the file monitor. ######################################## # Port resyncing on restart Fix or remove networks where uplinks have changed in config - fixes uplink interfaces from VPP where they've changed in config or where the config didn't fully get pushed to VPPFowarder - deletes interfaces and networks from VPP where the the physical network is no longer configured - evicts ports from bridges with no network # One uplink per network # One physnet can serve multiple uplinks # Find uplink ports on OpenStack networks # Find physical network ports # Find physnets we intend according to the config # Can be 'None', that's fine as it won't match anything later # Delete networks with a physnet whose config changed # This will remove ports from bridges, which means # that they may be rebound back into networks later # or may be deleted if no longer used. # This configuration has changed. # Untag the original physnet interface, which is no # longer used as a physnet # In case there was a flat network, make sure the flat # network bridge no longer exists # The remaining networks (with uplinks and bridge domains) are # functional, and idempotent binding will do nothing to # interfaces in the right bridges. It will fix those in the # wrong bridges. # Dead bridges have been deleted and binding # will find a new home for the interfaces that still exist. Assuming no local data, find bound ports in VPP This analyses the tags to identify ports in VPP that have been bound by this process before it restarted. # Find downlink ports ######################################## Find vhostuser connections with an attached VM The moment of VM attachment is useful, as it's one of the preconditions for notifying Nova a socket is ready. Watching the vhostuser data inside VPP has a performance impact on forwarding, so instead we watch the kernel's idea of which vhostuser connections are properly opened. Having two open sockets is 99% ready - technically, the interface is ready when VPP has mapped its memory, but these two events are nearly contemporaenous, so this is good enough. # We need dirname to have precisely one trailing slash. # Track unix sockets in vhost directory that are opened more # than once # Problems with fnames with spaces in, though # Report on any sockets that are open exactly twice (VPP + KVM) # (note list clone so that we can delete entries) # .. we don't have to notify the port drops, that's fine # Update this *before* making callbacks so that this register is up # to date ######################################## Proxy for VPP's ifup. ######################################## Find (and mark used) the interface for a physnet Delete a bridge corresponding to a network from VPP Usable on restart - uses nothing but the data in VPP. # If there are ports still in this network, disable them # They may be deleted later (if at startup) or they may # be rebound to another bridge domain # When this bridge domain is for an OpenStack flat network, the # uplink interface may be a physical interface, i.e. not VLAN-based # sub-interfaces. In this case, we will not bring down the uplink # interface, and always leave it UP. # At startup, this is downing the interfaces in a bridge that # is no longer required. However, in free running, this # should never find interfaces at all - they should all have # been unbound before the deletion. (If it does find them, # the removal of interfaces is probably the best thing we can # do, but they may not stay down if it races with the binding # code.) # The physnet is gone so no point in keeping the vlan sub-interface # TODO(onong): VxLAN ######################################## # stolen from LB driver Check if the bridge exists and make sure it is up. # If the device doesn't exist this will throw a RuntimeError Create a bridge unless it already exists. # _bridge_exists_and_ensure_up instead of device_exists is used here # because there are cases where the bridge exists but it's not UP, # for example: # 1) A greenthread was executing this function and had not yet executed # "ip link set bridge_name up" before eventlet switched to this # thread running the same function # 2) The Nova VIF driver was running concurrently and had just created # the bridge, but had not yet put it UP # TODO(ijw): should be checking this all succeeded # end theft ######################################## See if we need to take action when a net device is created This function will be called as a callback when a new interface is created in Linux kernel. We will filter for tap interfaces created by OpenStack, and those will be added to the bridges that we create on the Neutron side of things. # TODO(ijw) will act upon other mechanism drivers' taps # Add the detected external device to be handled by the port-watcher Ensure detected external tap devices are added to the bridge. All detected external devices are queued in the external_devices data set. So handle it in this method to ensure that these are added to the bridge. Add a TAP device to a Linux kernel bridge Defend against this having been done already (common on restart) and this missing a requirement (common when plugging external tap interfaces). # External TAP interfaces created by DHCP or L3 agent will be # added to corresponding Linux Bridge by vpp-agent to talk to # VPP. During a regular port binding process, there are two # code paths calling this function for adding the interface to # the Linux Bridge, which may potentially cause a race # condition and a non-harmful traceback in the log. Also, it # is quite possible that a bridge may have been deleted by the # normal port unbinding process before this code tries to add # the tap interafce. # The fix will eliminate the non-harmful traceback in the log. # This is the kernel-side config (and we should not assume # that, just because the interface exists in VPP, it has # been done previously - the crash could occur in the # middle of the process) # Running it twice is harmless. Never running it is # problematic. # TODO(ijw): someone somewhere ought to be sorting # the MTUs out # This is the device that we just created with VPP # This is the external TAP device that will be # created by Nova or an agent, say the DHCP agent, # later in time. # This is called by the (eventlet) inotify functions and the (eventlet) # etcd functionality, and thus needs an eventlet-based lock. We've found # oslo_concurrency thinks that, because threading is unpatched, a threading # lock is required, but this ends badly. Create or update vpp interface on host based on if_type. Depending on the if_type (maketap, plugtap or vhostuser) call vpp papi to do vpp side of the plumbing. This will change depending on the if_type. The interfaces are tagged saved in the internal dict for easy retrieval. The call is idempotent if the uuid and its associated interface is already present. :return: dict indexed on uuid # It's definitely there, we made it ourselves # TODO(ijw): it may exist, but we may need to create it # - and what exists may be wrong so we may have to # recreate it # TODO(ijw): idempotency # Deal with the naming conventions of interfaces # TODO(ijw): naming not obviously consistent with # Neutron's naming # TODO(ijw) structured type # NB(onong): In case the if_type is vhostuser then this is the # neutron port's/VM's mac and it has implications for gpe networks # so please be mindful before altering this # The agent has at some point reset, but before the reset # this interface was at least created. A previous sweep # will have ensured it's the right sort of interface. # Make an interface, and tag it for refinding. # Plugtap interfaces belong in a kernel bridge, and we need # to monitor for the other side attaching. Ensure vpp interface for imminent vhost socket binding. Somebody has dropped a file in the vhost_socket_directory which matched our watch pattern (Neutron port uuid). We are expecting an imminent vhost socket binding (from presumably Nova), so lets get ahead of the curve and create a vhost socket for it. Inteface name is the vhost socket file name and since we don't know the mac, let vhost interface create function make one. Idempotently ensure that a bridge contains an interface The interface must exist, but we ensure the bridge exists and that the interface is in it # Adding an interface to a bridge does nothing if it's # already in there, and moves it if it's in another Configure the interface in VPP per the binding request. Because we may be restarting the agent on a VPP that is already running, do this defensively: interfaces that we do not know about may have had some of their binding done. Acting in this way, we can be sure that the interface is now correctly bound regardless of what may have transpired previously. This may be called at any time because of a request from the mechanism driver, or it may be called during resync when state already exists in VPP but in either case we fix what we find and draw out from that a picture of the current state, including whether (in the case of vhostuser interfaces) the far end of the socket has attached to VPP. # In order, we create the network bridge, the interface for # the far end, and we add it to the bridge. Any of these # may have been done before; the functions we call correct # any previous state they find. # Returning None allows us to deal with the uplink # side of a failed binding in the caller. # For resyncs, the port exists but it's not in a bridge domain # and is down, which is the best we can offer. # Ensure local mac to VNI mapping for GPE Detach an interface, clean up structures This removes and destroys the interface and the network if it is no longer used. This is *not* used in rebinding, as this requires the data we stored about an interface when it was bound. # Delete the port ip address from remote_group_id list # Check if this is the last interface on host, safe if this # interface is incompletely bound # safe if the other interface is not bound # Network is not used on this host, delete it Bind the subport of a bound parent vhostuser port. # We ensure parent port binding before calling this method. # parent vhostuser intf # Ensure that the uplink and the BD's are setup # Ensure an uplink for the subport # Use the uplink physnet, uplink_seg_id & seg_type # fetch if the subport interface already in vpp # It's already there and we created it # Ensure that the recovered subport is in vpp bridge # create subport vhostuser intf and ensure it's in vpp bridge # set subport tag, so we can find it during resyncs # Add subport props to interfaces along with parent port uuid Unbind the vhostuser subport in VPP. # Don't unbind a trunk port with subports # We no longer need this interface. Specifically if it's # a vhostuser interface it's annoying to have it around # because the VM's memory (hugepages) will not be # released. So, here, we destroy it. # GPE code in VPP does not clean up its data structures # properly if the port # is deleted from the bridge without first removing the # local GPE eid mapping. So remove local mapping, # if we are bound using GPE # Delete port from vpp_acl map if present # remove port from bridge (sets to l3 mode) prior to deletion # If it is a subport of a trunk port then delete the corresponding # vlan sub-interface. Otherwise it is a parent port or a normal # vhostuser port and we delete the vhostuser interface itself. # This interface is no longer connected if it's deleted # RACE, as we may call unbind BEFORE the vhost user # interface is notified as connected to qemu # remove port from bridge (sets to l3 mode) prior to deletion # For us, we expect failing commands and want them ignored. # These may fail, don't care much # TODO(ijw) this *really* needs typing with the return value structure. Convert a SecurityGroupRule to VPP ACL rule. Arguments: r - SecurityGroupRule NamedTuple Object SecurityGroupRule = namedtuple( 'SecurityGroupRule', ['is_ipv6', 'remote_ip_addr', 'ip_prefix_len', 'protocol', 'port_min', 'port_max']) TODO(ijw): is_ipv6 appears to be the v6-ness of remote_ip_addr d - Direction: 0 ==> ingress, 1 ==> egress Default == 2 Return: VPP-formatted ACL Rule # a - Permit-Action: 1 == permit, 2 == reflexive; # Enable reflexive ACLs for all TCP/UDP and IP traffic # Disable reflexive for other traffic such as ICMP etc. # for ingress: secgroup remote_ip == Source IP # for egress: secgroup remote_ip == Destination IP # Port ranges are always destination port ranges for TCP/UDP # Set source port range to permit all ranges from 0 to 65535 # OpenStack may provide an interface with subnet (i.e. an # address *on* a network and not an address *of* the # network). VPP requires the network address. # OpenStack may provide an interface with subnet (i.e. an # address *on* a network and not an address *of* the # network). VPP requires the network address. # Handle ICMP/ICMPv6 # All ICMP Types and Codes [0-255] # All ICMP codes for an ICMP Type # port_min == ICMP Type and port_max == ICMP Code # Handle TCP/UDP protocols # Allow all ranges for source ports # Handle all protocols - All IPv4 and IPv6 TCP/UDP traffic # Reverse rules are only added if reflexive_acls is set to False Compose and return a reverse rule for r if reflexive_acls is False Arguments: r - rule dictionary returned by the _to_acl_rule(r) method above swap src and dst IP and port ranges to match return traffic for r # 1 == Permit rule and 0 == deny rule # All TCP/UDP IPv4 and IPv6 traffic # Swap port range values Adds/Replaces the secgroup ACL within VPP Arguments: secgroup - SecurityGroup NamedTuple object namedtuple('SecurityGroup', ['id', 'ingress_rules', 'egress_rules']) # Default action == ADD if the acl indexes are set to ~0 # VPP ACL indexes correspond to ingress and egress security # group rules # If not reflexive_acls create return rules for ingress and egress # IPv4/IPv6 tcp/udp traffic # Exclude ICMP # If this is on the pending delete list it shouldn't be now Deletes the ingress and egress VPP ACLs on host for secgroup This may delete up front or it may defer (and delete when it's next called, which is adequately fast) if there's a port using the ACL. Arguments: secgroup - OpenStack security group ID # Attempt both the current ACL and any more ACLs that have been # previously deferred: # This security group doesn't exist, don't add to the # deferred list # Discard the security group from the remote group dict # We could defer this again but it's probably better # we move on. Orphaned ACLs are not the end of the world. From vpp acl dump, populate the secgroups to VppACL mapping. Get a dump of existing vpp ACLs that are tagged, by tag Decode tag info populate secgroups data structure relating UUID of secgroup to ACL self.secgroups = {secgroup_id : VppAcl(in_idx, out_idx)} # Clear existing secgroups to ACL map for sanity # Example of the acl_map data # acl_map: {'net-vpp.secgroup:<uuid>.from-vpp' : acl_idx # 'net-vpp.secgroup:<uuid>.to-vpp' : acl_idx, # 'net-vpp.common_spoof.from-vpp': acl_idx } # Tags can be one of ours, or one something else set # decode_* functions attempt to match the tags to one of our # formats, and returns None if that's not a format it matches. # Check if this is one of our common spoof ACL tag # If so, get the tag direction and set the secgroup_id to # COMMON_SPOOF_TAG so the correct spoof ACL can be read # But it is a valid spoof tag # one of our valid secgroup ACL tag # This is neither a security group or a spoof # - so it's not installed by the mechdriver at all # secgroup_id will be missing first pass, and should be # completed on the second round through. # py3 note: in py3 keys() does not return a list but the following # seems to work fine. Enclose in list() is problems arise. Read VPP ACL tag data, construct and return an acl_map based on tag acl_map: {secgroup_tag : acl_idx} # TODO(ijw): identify that this is one of our tags # Not all ACLs have tags, but ACLs we own will # have them and they will be decodeable. Ignore # any externally created ACLs, they're not our problem. Compute a vector of input/output ACLs and set it on the VPP port. Arguments: secgroup_ids - OpenStack Security Group IDs sw_if_index - VPP software interface index on which the ACLs will be set This method checks the global secgroups to acl mapping to figure out the ACL indexes associated with the secgroup. It then composes the acl vector and programs the port using vppf. If the secgroup cannot be found or if the ACL index is invalid i.e. 0xffffffff it will return False. This happens mostly in agent restart situations when the secgroups mapping is still being populated by the secgroup watcher thread, but since the port and secgroup threads are independent it can happen at any moment. # A list of VppAcl namedtuples to be set on the port # If any one or both indices are invalid wait for a valid acl Build a vector of VPP ACLs and set it on the port Arguments - vpp_acls - a list of VppAcl(in_idx, out_idx) namedtuples to be set on the interface. An empty list '[]' deletes all user defined acls from the interface and retains only the spoof ACL # Initialize lists with anti-spoofing vpp acl indices # input acl on vpp filters egress traffic from vm and viceversa # in on vpp == out on vm # out on vpp == in on vm # Build the vpp ACL vector # (najoy) At this point we just keep a mapping of acl vectors # associated with a port and do not check for any repeat application. Set the mac-filter on VPP port Arguments: mac_ips - A list of tuples of (mac_address, ip_address) sw_if_index - Software index ID of the VPP port Pack a mac_address into binary. # ip can be an address (or) a network/prefix # TODO(ijw): is it ever an interface rather than a network address? # This is the struct the VPP API accepts: note the packed address # get the current mac_ip_acl on the port if_any # There may not be an ACL on the interface # Delete the previous macip ACL from VPP Removes all security group ACLs on the vpp port Arguments:- sw_if_index - Software index of the port on which ACLs are to be removed # We should know about the existing ACLS on port by looking up # port_vpp_acls. If there is a KeyError, we do not know about any # ACLs on that port. So ignore Removes all MAC/IP ACLs on the vpp port These ACLs correspond to anti-spoof and allowed-address-pair. Arguments:- sw_if_index - Software index of the port on which ACLs are to be removed Adds a spoof filter ACL on host if not already present. A spoof filter is identified by a common spoof tag mark. If not present create the filter on VPP, If it is present, replace it for good measure to ensure that the correct anti-spoof rules are always applied. Return: VppAcl(in_idx, out_idx) # Check if we have an existing spoof filter deployed on vpp # Get the current anti-spoof filter rules. If a spoof filter is # present replace rules for good measure, else create a new # spoof filter # Add the new spoof ACL to secgroups mapping if it is valid Pack an IPv4 or IPv6 (ip_addr or ip_network) into binary. If the argument is an ip_address, it is packed and if the argument is an ip_network only the network portion of it is packed Arguments:- ip_addr: an IPv4 or IPv6 address without a prefix_length e.g. 1.1.1.1 (or) an IPv4 or IPv6 network with prefix_length e.g. 1.1.1.0/24 # Works for both addresses and the net address of masked networks Return the internal and external interface indices for SNAT. Ensure the internal n/w, external n/w and their corresponding BVI loopback interfaces are present, before returning their index values. # Get internal network details. # Get the external network details # Return the internal and external BVI loopback intf indxs. Check if the external subinterface can be deleted. It can be deleted if it still exists and has no more addresses. # Create a VLAN subif Ensure a BVI loopback interface for the bridge. Ensure a router interface on the local host. Creates a loopback interface and sets the bridge's BVI to the loopback interface to act as an L3 gateway for the network. For external networks, the BVI functions as an SNAT external interface. For updating an interface, the service plugin removes the old interface and then adds the new router interface. If an external gateway exists, ensures a local route in VPP. When Layer3 HA is enabled, the router interfaces are only enabled on the active VPP router. The standby router keeps the interface in an admin down state. # The interface could be either an external_gw or an internal router # interface on a subnet # Enable SNAT by default unless it is set to False # Multiple routers on a shared external subnet is supported # by adding local routes in VPP. # True for local-only VPP routes. # Create an external interfce if the external_gateway_info key is # present, else create an internal interface # To support multiple IP addresses on a router port, add # the router to each of the subnets. # Ensure the network exists on host and get the network data # Get the bridge domain id and ensure a BVI interface for it # Ensure a BVI (i.e. A loopback) for the bridge domain # Create a loopback BVI interface # Create the loopback interface, but don't bring it UP yet # Set the VRF for tenant BVI interfaces, if not already set # Make a best effort to set the MTU on the interface # Log error and continue, do not exit here # Now bring up the loopback interface, if this router is the # ACTIVE router. Also populate the data structure # router_interface_states so the HA code can activate and # deactivate the interface # Set SNAT on the interface if SNAT is enabled # Get a list of all SNAT interfaces # Set the SNAT 1:N overload on the external loopback interface # Add GPE mappings for GPE type networks only on the master # node, if ha_enabled # Set the gateway IP address on the BVI interface, if not already set # Is there another gateway ip_addr set on this external loopback? # Any address other than the one we're thinking of? # This address is not yet present? # Add a local VRF route if another external gateway exists # Add a local IP route if it doesn't exist # Ensure that all gateway networks are exported into this # tenant VRF & # A default route exists in this VRF to the external gateway # TODO(onong): # The current VPP NAT implementation supports only one outside # FIB table and by default it uses table 0, ie, the default vrf. # So, this is a temporary workaround to tide over the limitation. # Ensure that the gateway network is exported into all tenant # VRFs, with the correct default routes This node will become the master router # TODO(najoy): Bring up intf. only if not set to admin DOWN This node will become the backup router Returns the IP network for the gateway in CIDR form. # ensure that default route in default VRF is present Exports the external gateway into the tenant VRF. The gateway network has to be exported into the tenant VRF for it to communicate with the outside world. Also a default route has to be set to the external gateway IP address. If source_vrf (i.e tenant VRF) is provided, - Export the external gateway's IP from VRF=0 into this VRF. - Add a default route to the external_gateway in this VRF Else, - Export the external gateway into into all tenant VRFs - Add a default route to the external_gateway in all tenant VRFs If the external gateway IP address is not provided: All external networks are exported into tenant VRFs # TODO(najoy): Check if the tenant ID matches for the gateway router # external interface and export only matching external routes. # If a source vrf is present only update if the VRF matches # Get the external and internal networks in the CIDR form # Add the default route (0.0.0.0/0) to the # external gateway IP addr, which is outside of VPP # with the next hop sw_if_index set to the external # loopback BVI address. # Note: The external loopback sw_if_index and the # next_hop_address is mandatory here to prevent a VPP # crash - Similar to the CLI command # ip route add table <int-vrf> 0.0.0.0/0 via <next-hop-ip> # <next-hop-sw-indx> # # Note(onong): Do not set IPv6 default gateway to an IPv4 # external gateway # Export the external gateway subnet into the tenant VRF # to enable tenant traffic to flow out. Exporting is done # by setting the next hop sw if index to the loopback's # sw_index (i.e. BVI) on the external network # CLI: ip route add table <int_vrf> <external-subnet> # via <next-hop-sw-indx> # # Note(onong): Do not export an IPv4 external network # into an IPv6 VRF. # Export the tenant network into external VRF so the # gateway can route return traffic to the tenant VM from # the Internet. # CLI: ip route add table 0 <tenant-subnet> via # <tenant-loopback-bvi> # # Note(onong): Do not export an IPv4 internal network # into an IPv6 external VRF. # Delete the exported route in tenant VRF # Delete the exported route from the external VRF Deletes a router interface from the host. Disables SNAT, if it is set on the interface. Deletes a loopback interface from the host, this removes the BVI interface from the local bridge. Also, delete the default route and SNAT address for the external interface. # Get all local IP addresses in the external VRF belonging # to the same external subnet as this router. # Check if atleast one local_ip matches a neutron assigned # external IP address of the router. # If there's no match, there are no valid local IPs within VPP. # While, in theory, there may be multiple IPs on an interface, # in practice, we only program one (and program additional # IPs via a local route). # TODO(ijw): this is a somewhat unclean way of removing IP # addresses attached to VPP interfaces that are in the # subnet of ext_intf_ip, I think. Unclear if this is the # right way to do that versus remembering the interface. # Is the local_ip valid? # For-else would mean no breaks i.e. no valid local_ips # If an external local route, we can safetly delete it from VPP # Don't delete any SNAT # External router is a loopback BVI. If a local route exists, # replace the BVI's IP address with its IP address. # Don't delete the SNAT. # Delete the IP address from the BVI. # Delete the local route # Set the router external interface corresponding to the local # route as non-local. # At this point, we can safetly remove both the SNAT and BVI # loopback interfaces as no local routes exist. # Get SNAT out interfaces whose IP addrs are overloaded # delete SNAT if set on this interface # Delete the external 1:N SNAT and default routes in all VRFs # for external router interface deletion # Delete external snat addresses for the router # Delete all exported routes into tenant VRFs belonging to this # external gateway # delete the default route in the default VRF # Delete all exported routes from this VRF # Delete the gateway IP address and the BVI interface if this is # the last IP address assigned on the BVI # Get all IP's assigned to the BVI interface # Dont' delete the BVI, only remove one IP from it # Last subnet assigned, delete the interface # Remove any local GPE mappings Associate any pending floating IP addresses. We may receive a request to associate a floating IP address, when the router BVI interfaces are not ready yet. So, we queue such requests and do the association when the router interfaces are ready. Associate the floating ip address and update state. # It is possible that during a VPP+agent restart scenario, the tenant's # VRF has not been set on the loopback # # For different tenants mapped to different VRFs, it is quite possible # that the same fixed IP addr is mapped to different floating IP addrs, # for example: # # (192.168.10.5, FIP1, VRF1) # (192.168.10.5, FIP2, VRF2) # # So, we check for (localip, extip, tenenat_vrf) in VPP before creating # the mapping. # Clear any dynamic NAT sessions for the 1:1 NAT to take effect Add the VPP configuration to support One-to-One SNAT. Arguments:- floating_ip: The UUID of the floating ip address floatingip_dict : The floating ip data # Associate the floating IP iff the router has established a tenant # VRF i.e. a vrf_id > 0 Remove the VPP configuration used by One-to-One SNAT. Arguments:- floating_ip: The UUID of the floating ip address to be disassociated. # Check if we know about this floating ip address # Delete the SNAT internal and external IP address mapping. Build and return a list of anti-spoofing rules. Returns a dict with two keys named: ingress_rules and egress_rules ingress_rules = a list of ingress rules egress_rules = a list of egress rules # Set is_permit = 2 if reflexive_acls and tcp/udp/ip traffic # Ingress filter rules to allow DHCP and ICMPv6 into VM # Allow incoming DHCP offer packets from dhcp servers # UDP src_port 67 (ipv4 dhcp server) and dst_port 68 (dhclient) # UDP src_port 547 (ipv6 dhserver) and dst_port 546 (ipv6 dclient) # Allow Icmpv6 Multicast listener Query, Report, Done (130,131,132) # neighbor soliciation (135) and neighbor advertisement (136) and # MLD2_REPORT (143) and ICMP_RA into the Instance # Egress spoof_filter rules from VM # Permit DHCP client packets (discovery + request) # UDP src_port 68 (ipv4 client) and dst_port 67 (ipv4 dhcp server) # UDP src_port 546 (ipv6 client) and dst_port 547 (ipv6 dhcp server) # Drop DHCP Offer packets originating from VM # src_port 67 and dst_port 68 # src_port 547 and dst_port 546 # Drop icmpv6 Router Advertisements from VMs. # Allow other outgoing icmpv6 packets # When packets are fragmented (as UCP(v6), ICMP(v6) and TCPv4 packets # all can be, VPP will match any fragment against the first rule # relating to that address and protocol. It ignores things like ports # and ICMP types because they aren't in the second and later fragments. # # If you want second and later fragments to get through, the first rule # that matches them *must* be a 'permit' rule. # # In our case it only happens for ICMPv6; we add a permit rule on an # invalid code to pre-empt the RA deny when matching fragments. # For TCPv4/v6, and ICMPv4, we don't have deny rules in spoof SG. so we # are good; # For UDPv4/v6, we do have a permit rule of DHCPv4/v6, so we are good; # For ICMPv6, we are adding a dummy permit rule to workaround this; # Permits ICMPv6 fragments while not permitting (valid) # packets (type 0 is invalid) # ... because this rule would otherwise match fragments, being # the first rule, and would deny them # Permit TCP port 80 traffic to 169.254.169.254/32 for metadata # TODO(ijw): make configurable? # TrunkWatcher thread's heartbeat interval # TODO(onong): make it configurable if need be # Add GPE key-watching, if required # These data structures are used as readiness indicators. # A port is only in here only if the attachment part of binding # has completed. # key: ifidx of port; value: (UUID, bound-callback, vpp-prop-dict) # key: UUID of port; value: ifidx # Members of this are ports requiring security groups with unsatisfied # requirements. # Sub-ports of a trunk with pending port bindings. # trunk_port ID => List(sub_ports awaiting binding) # When the agent is restarted, it could receive an etcd watch event # to bind subports even before the parent port itself is bound. This # dict keeps tracks of such sub_ports. They will be reconsidered # for binding after the parent is bound. # bound subports of parent ports # trunk_port ID => set(bound subports) # We also need to know if the vhostuser interface has seen a socket # connection: this tells us there's a state change, and there is # a state detection function on self.vppf. # Unbinding an unknown port Bind an interface as instructed by ML2 on this host. The interface as a network and binding type. Assuming the network as been dropped onto the physnet specified, bind that uplink to the interface in question by creating an interface of the appropriate form and propagating the network to it. This call also identifies if we should consider the interface fully up. This may happen now, or, asynchronously, later, depending on whether all the prerequisites are in place. That includes the behaviour of whatever's on the other end of the interface. # args['binding_type'] in ('vhostuser', 'tap'): # For GPE, fetch remote mappings from etcd for any "new" network # segments we will be binding to so we are aware of all the remote # overlay (mac) to underlay (IP) values # For GPE, a physnet value is not messaged by ML2 as it # is not specified for creating a gpe tenant network. Hence for # these net types we replace the physnet with the value of # gpe_locators, which stand for the physnet name. # Problems with the binding # We will never notify anyone this port is ready. # Store the binding information. We put this into # etcd when the interface comes up to show that things # are ready and expose it to curious operators, who may # be able to debug with it. This may not happen # immediately because the far end may not have connected. # 'None' is a special value indicating no port security On a restart, find bound ports and clean up unwanted config Does the following: - fixes uplinks - identifies the ports we bound previously - they may need removing or updating Ports intended to be bound will have .bind() called later in the resync, which will correcly populate VPPForwarder structures and fix bindings whose type has changed; ports that are no longer needed will be unbound. Returns a set of bound ports Apply non-secgroup security to a port This is an idempotent function to set up the port security (antispoof and allowed-address-pair) that can be determined solely from the data on the port itself. # TODO(ijw): this is a convenience for spotting L3 and DHCP # ports, but it's not the right way # If (security-groups and port_security) # are enabled and it's a vhostuser port # proceed to set L3/L2 ACLs, else skip security # Set Allowed address pairs and mac-spoof filter Check current port security state. See if any of the ports awaiting security group ACL population can now be secured. # TODO(ijw): could be more efficient in selecting ports to check Apply secgroups to a port if all constructs are available This is an idempotent function to set up port security. It relies on the pre-existence of the ACLs corresponding to security groups, so it may or may not be possible to apply security at this moment in time. If it is, the port is recorded as secure (allowing binding to complete), and if it isn't we will attempt to reapply as more security groups are created. It is reapplied if the security group list changes on the port. It is not reapplied if the security group content is changed, because the ACL number remains the same and therefore so does the port config. # The port was unbound before we could apply ACLs # TODO(ijw): this is a convenience for spotting L3 and DHCP # ports, but it's not the right way # (TODO(ijw) it's also the only reason we go to iface_state) # If security-groups are enabled and it's a port needing # security proceed to set L3/L2 ACLs, else skip security. # If security-groups are empty, apply the default spoof-acls. # This is the correct behavior when security-groups are enabled but # not set on a port. # port security off # The ACLs for secgroups are not yet ready # Leave ourselves in the pending list # Remove with no error if not present # The callback from VPP only knows the IP; convert # .. and note that we may not know the conversion # Not a port we know about Flag that an interface is connected, if it is This is a combination of 'we did our bit' and 'the other end connected'. These can happen in either order; if we resync, we recheck our binding but the other end may have connected already. This both tells Nova the interface is ready and brings the interface up in VPP. There is nothing wrong (other than a bit of inefficiency) in sending this to Nova multiple times; the watching driver may see the key write multiple times and will act accordingly. # Binding hasn't completed # For trunk sub-ports, it's the parent vhostuser port that needs to # be linked up # vhostuser connection that hasn't yet found a friend Add or replace a VPP ACL. Arguments: secgroup - OpenStack SecurityGroup ID data - SecurityGroup data from etcd # Create a rule for the remote_ip_prefix (CIDR) value # Create a rule for each ip address in the remote_group # Add the referencing secgroup ID to the remote-group lookup # data set. This enables the RemoteGroupWatcher thread to # lookup the secgroups that need to be updated for a # remote-group etcd watch event # VPP API requires the IP addresses to be represented in binary # At this point: # 1. we convert to the form VPP likes - a packed address # 2. we fix up the rule. At this point it's what Neutron gave us # and Neutron doesn't strictly check that the rule is a network # address compatible with the mask, but VPP cares. Our assumption # is that only bits significant relative to the mask are intended # to matter, though that's ill-defined in the Neutron API. # OpenStack should provide a network address here, but # doesn't correctly validate input. # Flatten ingress and egress rules Delete ACL on host. Arguments: secgroup - OpenStack SecurityGroup ID Deploy anti-spoofing ingress and egress ACLs on VPP. Tag ingress spoof acl on VPP with ID: FFFF:0 Tag egress spoof acl on VPP with ID: FFFF:1 Add Spoof ACL mapping with Key: "FFFF" Val: VppAcl(in_idx, out_idx) to secgroups mapping Set L2/L3 ACLs on port. Arguments:- mac_address - The mac_address assigned to the port fixed_ips - A list of dictionaries containing the fixed_ips assigned to the port identified by the key - 'ip_address' allowed_address_pairs - A list of allowed address pair attributes - Each address pair is a dict with keys: ip_address (required) mac_address (optional) sw_if_index - VPP vhostuser if_idx # Allowed mac_ip list to permit for DHCP request from 0.0.0.0 # Allow Ipv6 link local address for neighbor discovery # mac-ip-acls are egress only ACLs from an instance # A list of tuples of MAC Addrs. and their corresponding IP Addrs. # use the port-mac if a mac_address is not present in the allowed # address pair Load the sw_if_index to mac_ip_acl index mappings on vpp. Populates self.vppf.port_vpp_acls : {sw_if_index -> {'l23' : <macip_acl_index>}} # The acl position is the sw_if_index # Exclude invalid acl index # vpp_papi throws this error when no ACLs exist # cannot reference acl attribute - pass and exit Update the ACLs of all security groups that use a remote-group. When a remote_group to port association is changed, i.e. A new port is associated with (or) an existing port is removed, the agent needs to update the VPP ACLs belonging to all the security groups that use this remote-group in their rules. Since this is called from various threads it makes a new etcd client each call. # TODO(najoy):Update to the new per thread etcd-client model # TODO(ijw): all keys really present? # If the security group is deleted before the agent gets to it, # handle the exception. # EtcdListener Trunking section Try to bind subports awaiting their parent port to be bound. If the parent port - is bound - instance has connected to the other end of the vhostuser - security groups has been applied - is in admin UP state then: - bind the subports, and - set subport state to admin UP # Get the list of *currently* awaiting subports # (allows us to change and clear up the dict as we go through them) # Make sure parent port is really ready Return a list of subports to unbind for a parent port. subports :- A set of subports that need to be currently bound to the parent port. # unbind 'bound sub-ports' that are not in the current subports Return a list of subports to unbind for a parent port. subports :- A set of subports that need to be currently bound to the parent port. # remove ports from subports that are already bound and only bind the # new ports. Bind or unbind the subports of the parent ports as needed. To unbind all bound subports of a parent port, provide the parent_port argument with subports set to an empty list. Sample subports data structure: List of dicts [{"segmentation_id": 11, "uplink_seg_id": 149, "segmentation_type": "vlan", "uplink_seg_type": "vlan", "port_id": "9ee91c37-9150-49ff-9ea7-48e98547771a", "physnet": "physnet1", "allowed_address_pairs": [], "port_security_enabled": true, "security_groups": ["8d55a44a-935d-4296-99ab-b0749b725df4"], "bound_callback" : bind_notifier_object, }, {"segmentation_id": 12, "uplink_seg_id": 139, "segmentation_type": "vlan", "uplink_seg_type": "vlan", "port_id": "2b1a89ba-78f1-4350-b71a-7caf7f23cbcf", "physnet": "physnet1", "allowed_address_pairs": [], "port_security_enabled": true, "security_groups": ["8d55a44a-935d-4296-99ab-b0749b725df4"], "bound_callback" : bind_notifier_object, } ] # bind subports we are told to bind # Bring up the subport # Set port security on subport # unbind subports we are told to unbind # seconds # TODO(ijw): needs to remember its last tick on reboot, or # reconfigure from start (which means that VPP needs it # storing, so it's lost on reboot of VPP) # We need certain directories to exist so that we can write to # and watch them # py3 note: in py3 keys() does not return a list but the following # seems to work fine. Enclose in list() is problems arise. # We need to be wary not to hand the same client to multiple threads; # this etcd_helper and client dies here # load sw_if_index to macip acl index mappings # The security group watcher will load the secgroups before # this point (before the thread is spawned) - that's helpful, # because it means that the ports will be immediately createable # as the secgroups are already available. # Spawn trunk watcher if enabled # Spawn GPE watcher for GPE tenant networks # Spawning after the port bindings are done so that # the RouterWatcher doesn't do unnecessary work # The key that indicates to people that we're alive # (not that they care) Identify known ports in VPP We are beginning a resync because the agent has restarted. We should be fixing VPP with the least disruption possible so that traffic being passed by VPP on currently configured ports is not disrupted. As such, this goes to find correctly configured ports (which - if still required - will be left alone) and removes structures that have been partially or incorrectly set up. # Removing key == desire to unbind # Get seg_id and mac to delete any gpe mappings # On initial resync, this information may not # be available; also, the network may not # be gpe # Unlike bindings, unbindings are immediate. # Gone is fine; if we didn't delete it # it's no problem # Create or update == bind # In EtcdListener, bind *ensures correct # binding* and is idempotent. It will also # fix up security if the security state has # changed. NB most things will not change on # an update. # For backward comatibility reasons, 'plugtap' now means 'tap' # Post-17.07 'tap' is used, but this allows compatibility with # previously stored information in etcd. # NB(onong): VM's mac is needed to be programmed as the lisp local # eid for data flow in gpe networks across compute nodes so please # do not change the line below without proper consideration. # TODO(ijw) convert incoming to security fmt # While the bind might fail for one reason or another, # we have nothing we can do at this point. We simply # decline to notify Nova the port is ready. # For GPE networks, # write the remote mapping data to etcd to # propagate both the mac to underlay mapping and # mac to instance's IP (for ARP) mapping to all # agents that bind this segment using GPE # NB(onong): The VM's mac needs to be programmed in the remote # mappings. Without this no communication is possible between VMs # running on separate compute nodes. Start an etcd watcher for router operations. Starts an etcd watcher on the /router directory for this node. This watcher is responsible for consuming Neutron router CRUD operations. # TODO(ijw): consider how to remove GPE references from the router # code, as they *should* be dealt with by port binding functions. Parse the key into two tokens and return a tuple. The returned tuple is denoted by (token1, token2). If token1 == "floatingip", then token2 is the ID of the floatingip that is added or removed on the server. If, token1 == router_ID and token2 == port_ID of the router interface that is added or removed. If, token1 == 'ha', then we return that token for router watcher to action. Add a GPE mapping to the router's loopback mac-address. # GPE remote mappings are added for only the master L3 router, # if ha_enabled # Become master if a state is True, else become backup # Update remote mappings for GPE bound router ports # Delete the GPE mapping first as we need to lookup the # router interface mac-address from vppf # TODO(ijw): we should probably do the secgroup work # here rather than up front # create or update a secgroup == add_replace vpp acl Details on how the remote-group-id rules are updated by the vpp-agent. This thread watches the remote-group key space. When VM port associations to security groups are updated, this thread receives an etcd watch event from the server. From the watch event, the thread figures out the set of ports associated with the remote-group-id and the IP addresses of each port. After this, this thread updates two data structures. The first one is a dictionary named port_ips, used to keep track of the ports to their list of IP addresses. It has the port UUID as the key, and the value is it's set of IP addresses. The second DS is a dict named remote_group_ports. This is used to keep track of port memberships in remote-groups. The key is the remote_group_id and the value is the set of ports associated with it. These two dictionaries are updated by the thread whenever watch events are received, so the agent always has up to date information on ports, their IPs and the remote-groups association. The RemoteGroupWatcher thread then calls a method named update_remote_group_secgroups with the remote_group_id as the argument. This method figures out which secgroups need to be updated as a result of the watch event. This is done by looking up another dict named remote_group_secgroups that keeps track of all the secgroups that are referencing the remote-group-id inside their rules. The key is the remote-group, and the value is the set of secgroups that are dependent on it. The update_remote_group_secgroups method then reads the rules for each of these referencing security-groups and sends it to the method named acl_add_replace with the security-group-uuid and rules as the argument.The acl_add_replace method takes each rule that contains the remote-group-id and computes a product using the list of IP addresses belonging to all the ports in the remote-group. It then calls the acl_add_replace method in vppf to atomically update the relevant VPP ACLs for the security-group. # remote_group_key format is "remote_group_id/port_id" # Value is a list of IP addresses # The set of IP addresses configured on a port # The set of ports in a security-group # Remove the port_id from the remote_group Watches trunk parent/subport bindings on the host and takes actions. Trunk keyspace format. /networking-vpp/nodes/<node-name>/trunks/<UUID of the trunk> Sample data format: {"status": "ACTIVE", "name": "trunk-new", "admin_state_up": true, "sub_ports": [ {"segmentation_id": 11, "uplink_seg_id": 149, "segmentation_type": "vlan", "uplink_seg_type": "vlan", "port_id": "9ee91c37-9150-49ff-9ea7-48e98547771a", "physnet": "physnet1", "allowed_address_pairs": [], "mac_address": "fa:16:3e:c4:80:dc", "port_security_enabled": true, "fixed_ips": [{"subnet_id": "05cfd12c-9db8-4f55-a2b9-aca89f412932", "ip_address": "10.110.110.7"}], "security_groups": ["8d55a44a-935d-4296-99ab-b0749b725df4"]}, {"segmentation_id": 12, "uplink_seg_id": 139, "segmentation_type": "vlan", "uplink_seg_type": "vlan", "port_id": "2b1a89ba-78f1-4350-b71a-7caf7f23cbcf", "physnet": "physnet1", "allowed_address_pairs": [], "mac_address": "fa:17:3e:c4:80:de", "port_security_enabled": true, "fixed_ips": [{"subnet_id": "05cfd12c-9db8-4f55-a2b9-aca89f412932", "ip_address": "10.110.110.8"}], "security_groups": ["8d55a44a-935d-4296-99ab-b0749b725df4"]}, ]} How does it work? The ml2 server: 1) Writes above etcd key/value when a trunk port is bound on the host. 2) Updates the above value when subports on a bound trunk are updated. 3) Deletes the key when the trunk is unbound. The trunkwatcher receives the watch event and it figures out whether it should perform a bind or unbind action on the parent and its subport and performs it. Invoked every TRUNK_WATCHER_HEARTBEAT secs # Check if there are child ports to be bound and brought UP Bind and unbind sub-ports of the parent port. # Due to out-of-sequence etcd watch events during an agent restart, # we do not yet know at this point whether the parent port is setup. # So, we'll add it to the awaiting parents queue and reconsider it. # reconsider awaiting sub_ports Unbind all sub-ports and then unbind the parent port. # First, unbind all subports # Then, unbind the parent port if it has no subports A thread to return bind-complete notifications to the server. This notifies the completion of a bind by writing a state key with the details of VPP's config (the other end doesn't care about the content, only the key's presence, so this is purely a debugging issue) to etcd. # An infinite queue over which we receive notifications Queue a notification for sending to Nova Nova watches a key's existence before sending out bind events. We set the key, and use the value to store debugging information. # TODO(ijw): do we ever clean this space up? # We must keep running, but we don't expect problems # If there are problems, retry the notification later. # There's no issue if we do this multiple times. # VPP connect timeout in seconds # TODO(najoy): check if vpp is actually up General purpose entrypoint Sets up non-specific bits (the integration with OpenStack and its config, and so on). # Arguments, config files and options # General logging # Guru meditation support enabled Main function for VPP agent functionality. # Pull physnets out of config and interpret them # Deal with VPP-side setup # Convert to the minutes unit that VPP uses: # (we round *up*) # py3 note: using // since we want integer division # Deal with etcd-side setup # Do the work
| 1.878843
| 2
|
packages/pegasus-python/src/Pegasus/db/workflow/stampede_statistics.py
|
ahnitz/pegasus
| 127
|
6625885
|
<filename>packages/pegasus-python/src/Pegasus/db/workflow/stampede_statistics.py
"""
Library to generate statistics from the new Stampede 3.1 backend.
Usage::
stats = StampedeStatistics(connString='sqlite:///montage.db')
stats.initialize('unique_wf_uuid')
stats.set_job_filter('dax')
print stats.get_total_jobs_status()
print stats.get_total_succeeded_jobs_status()
stats.set_job_filter('dag')
print stats.get_total_jobs_status()
print stats.get_total_succeeded_jobs_status()
etc.
stats.close()
Constructor and initialize methods:
The constructor takes a required sqlalchemy connection string
as the first argument. The stats class will default to returning
data in the "expanded workflow" mode. To change this behavior
and only analyize a single workflow set the optional arg:
expand_workflow = False
along with the connection string argument.
The initialize method is called with a single argument - the wf_uuid
of the desired "root workflow" whether returning data in expanded
mode or not. The method will return True or False if a query
exception is raised so the programmer can test for success before
calling the subsequent query methods. This method is intended
to be called once per object.
Job filtering:
Jobs can be filtered using any of the strings in the jobtype ENUM,
with the addition of the values 'all' and 'nonsub' which will
return all jobs and non-subworkflow jobs respectively. If the
filter is not explicitly set, it will default to the 'all' mode.
The desired filter can be set with the set_job_filter() method. After
setting this method, all subsequent calls to the query methods will
return results according to the filter. This can be set and reset
as many times as the user desires. There is an example of re/setting
the job filter in the usage section above. The query methods
will return different values after the filter is re/set.
Time filtering:
This behaves much like job filtering. For the runtime queries,
the time intervals 'month', 'week', 'day', and 'hour' can
be set using the set_time_filter() method. If this method
is not set, it will default to the 'month' interval for filtering.
Hostname filtering:
For the runtime queries the method set_host_filter() can be used to
filter by various hosts. This method differs from the job and time
filtering methods in that the argument can be either a string (for
a single hostname), or an array/list of hostnames for multiple
hostnames.
Example::
s.set_host_filter('butterfly.isi.edu')
or
s.set_host_filter(['engage-submit3.renci.org', 'node0012.palmetto.clemson.edu'])
Either one of these variations will work. The first variation will
only retrieve data for that one host, the second will return data
for both hosts. If this method is not set, no hostname filtering
will be done and information for all hosts will be returned.
Transformation filtering:
Transformation filtering works similarly to hostname filtering in
that it can accept a single string value or a array/list of strings.
However the set_transformation_filter() method accepts two keyword
arguments - 'include' and 'exclude'. Only one of these keywords can
be set per method call.
Example::
s.set_transformation_filter(include='pegasus::dirmanager')
s.set_transformation_filter(exclude=['dagman::post' , 'dagman::pre' ,'condor::dagman'])
etc.
This example demonstrates the two proper keyword invocations and
that either a string or list may be used. If this method is not
set, no filtering will be done and information for all transforms
will be returned. Calling this method with no arguments will
reset any previously set filters.
Return values from methods:
The return value types will vary from method to method. Most of
the methods will return a single integer or floating point number.
Methods which return rows from the DB (rather than just a number)
will return a list which can be interacted with in one of two
ways - either by array index (list of tuples) or by a named attr
(list of objects). The two following methods of interacting with
the same query results will both produce the same output:
Example::
for row in s.get_job_kickstart():
print row[0], row[1], row[2]
print row.job_id, row.job_name, row.kickstart
Either syntax will work. When using the named attribute method, the
attributes are the names of the columns/aliases in the SELECT
stanza of the query. If the row returned by the method is printed,
it will display as a tuple of results per row.
Methods::
get_sub_workflow_ids
get_descendant_workflow_ids
get_schema_version
get_total_jobs_status
get_total_succeeded_failed_jobs_status
get_total_succeeded_jobs_status
get_total_failed_jobs_status
get_total_jobs_retries
get_total_tasks_status
get_total_succeeded_tasks_status
get_total_failed_tasks_status
get_task_success_report
get_task_failure_report
get_total_tasks_retries
get_workflow_states
get_workflow_cum_job_wall_time
get_submit_side_job_wall_time
get_workflow_details
get_workflow_retries
get_job_statistics
get_job_states
get_job_instance_sub_wf_map
get_failed_job_instances
get_job_instance_info
get_job_name
get_job_site
get_job_kickstart
get_job_runtime
get_job_seqexec
get_condor_q_time
get_resource_delay
get_post_time
get_transformation_statistics
get_invocation_by_time
get_jobs_run_by_time
get_invocation_by_time_per_host
get_jobs_run_by_time_per_host
Methods listed in order of query list on wiki.
https://confluence.pegasus.isi.edu/display/pegasus/Pegasus+Statistics+Python+Version+Modified
"""
__author__ = "<NAME>"
import logging
from sqlalchemy import orm
from sqlalchemy.sql.expression import and_, case, cast, distinct, func, not_, or_
from sqlalchemy.types import Float, Integer
from Pegasus.db import connection
from Pegasus.db.errors import StampedeDBNotFoundError
from Pegasus.db.schema import *
# Main stats class.
class StampedeStatistics:
def __init__(self, connString, expand_workflow=True):
self.log = logging.getLogger(
"{}.{}".format(self.__module__, self.__class__.__name__)
)
try:
self.session = connection.connect(connString)
except connection.ConnectionError as e:
self.log.exception(e)
raise StampedeDBNotFoundError
self._expand = expand_workflow
self._root_wf_id = None
self._root_wf_uuid = None
self._job_filter_mode = None
self._time_filter_mode = None
self._host_filter = None
self._xform_filter = {"include": None, "exclude": None}
self._wfs = []
def initialize(self, root_wf_uuid=None, root_wf_id=None):
if root_wf_uuid is None and root_wf_id is None:
self.log.error("Either root_wf_uuid or root_wf_id is required")
raise ValueError("Either root_wf_uuid or root_wf_id is required")
q = self.session.query(Workflow.root_wf_id, Workflow.wf_id, Workflow.wf_uuid)
if root_wf_uuid:
q = q.filter(Workflow.wf_uuid == root_wf_uuid)
else:
q = q.filter(Workflow.wf_id == root_wf_id)
try:
result = q.one()
self._root_wf_id = result.wf_id
self._root_wf_uuid = result.wf_uuid
self._is_root_wf = result.root_wf_id == result.wf_id
except orm.exc.MultipleResultsFound as e:
self.log.error("Multiple results found for wf_uuid: %s", root_wf_uuid)
raise
except orm.exc.NoResultFound as e:
self.log.error("No results found for wf_uuid: %s", root_wf_uuid)
raise
self._wfs.insert(0, self._root_wf_id)
if self._expand:
"""
select parent_wf_id, wf_id from workflow where root_wf_id =
(select root_wf_id from workflow where wf_id=self._root_wf_id);
"""
sub_q = (
self.session.query(Workflow.root_wf_id)
.filter(Workflow.wf_id == self._root_wf_id)
.subquery("root_wf")
)
q = self.session.query(Workflow.parent_wf_id, Workflow.wf_id).filter(
Workflow.root_wf_id == sub_q.c.root_wf_id
)
# @tree will hold the entire sub-work-flow dependency structure.
tree = {}
for row in q.all():
parent_node = row.parent_wf_id
if parent_node in tree:
tree[parent_node].append(row.wf_id)
else:
tree[parent_node] = [row.wf_id]
self._get_descendants(tree, self._root_wf_id)
self.log.debug("Descendant workflow ids %s", self._wfs)
if not len(self._wfs):
self.log.error("No results found for wf_uuid: %s", root_wf_uuid)
raise ValueError("No results found for wf_uuid: %s", root_wf_uuid)
# Initialize filters with default value
self.set_job_filter()
self.set_time_filter()
self.set_host_filter()
self.set_transformation_filter()
return True
def _get_descendants(self, tree, wf_node):
"""
If the root_wf_uuid given to initialize function is not the UUID of the root work-flow, and
expand_workflow was set to True, then this recursive function determines all child work-flows.
@tree A dictionary when key is the parent_wf_id and value is a list of its child wf_id's.
@wf_node The node for which to determine descendants.
"""
if tree is None or wf_node is None:
raise ValueError("Tree, or node cannot be None")
if wf_node in tree:
self._wfs.extend(tree[wf_node])
for wf in tree[wf_node]:
self._get_descendants(tree, wf)
def close(self):
self.log.debug("close")
self.session.close()
def set_job_filter(self, filter="all"):
modes = [
"all",
"nonsub",
"subwf",
"dax",
"dag",
"compute",
"stage-in-tx",
"stage-out-tx",
"registration",
"inter-site-tx",
"create-dir",
"staged-compute",
"cleanup",
"chmod",
]
try:
modes.index(filter)
self._job_filter_mode = filter
self.log.debug("Setting filter to: %s", filter)
except Exception:
self._job_filter_mode = "all"
self.log.error("Unknown job filter %s - setting to all", filter)
def set_time_filter(self, filter="month"):
modes = ["month", "week", "day", "hour"]
try:
modes.index(filter)
self._time_filter_mode = filter
self.log.debug("Setting filter to: %s", filter)
except Exception:
self._time_filter_mode = "month"
self.log.error("Unknown time filter %s - setting to month", filter)
def set_host_filter(self, host=None):
"""
The host argument can either be a string/single hostname or
it can be a list/array of hostnames.
"""
self._host_filter = host
def set_transformation_filter(self, include=None, exclude=None):
"""
Either of these args can either be a single string/xform type or
it can be a list/array of xform types.
Both arguments can not be set at the same time. If they are,
the program will log an error and not do any filtering.
"""
self._xform_filter["include"] = include
self._xform_filter["exclude"] = exclude
#
# Pulls information about sub workflows
#
def get_sub_workflow_ids(self):
"""
Returns info on child workflows only.
"""
q = self.session.query(Workflow.wf_id, Workflow.wf_uuid, Workflow.dax_label)
q = q.filter(Workflow.parent_wf_id == self._root_wf_id)
return q.all()
def get_descendant_workflow_ids(self):
q = self.session.query(Workflow.wf_id, Workflow.wf_uuid)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
q = q.filter(Workflow.wf_id != self._root_wf_id)
return q.all()
def get_schema_version(self):
return self.s_check.check_version()
#
# Status of initially planned wf components.
#
#
# The following block of queries are documented here:
# https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary
# and
# https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file
#
def _dax_or_dag_cond(self, JobO=Job):
return or_(JobO.type_desc == "dax", JobO.type_desc == "dag")
def _get_job_filter(self, JobO=Job):
filters = {
"all": None,
"nonsub": not_(self._dax_or_dag_cond(JobO)),
"subwf": self._dax_or_dag_cond(JobO),
"dax": JobO.type_desc == "dax",
"dag": JobO.type_desc == "dag",
"compute": JobO.type_desc == "compute",
"stage-in-tx": JobO.type_desc == "stage-in-tx",
"stage-out-tx": JobO.type_desc == "stage-out-tx",
"registration": JobO.type_desc == "registration",
"inter-site-tx": JobO.type_desc == "inter-site-tx",
"create-dir": JobO.type_desc == "create-dir",
"staged-compute": JobO.type_desc == "staged-compute",
"cleanup": JobO.type_desc == "cleanup",
"chmod": JobO.type_desc == "chmod",
}
return filters[self._job_filter_mode]
def _max_job_seq_subquery(self):
"""
Creates the following subquery that is used in
several queries:
and jb_inst.job_submit_seq = (
select max(job_submit_seq) from job_instance where job_id = jb_inst.job_id group by job_id
)
"""
JobInstanceSubMax = orm.aliased(JobInstance)
sub_q = self.session.query(
func.max(JobInstanceSubMax.job_submit_seq).label("max_id")
)
sub_q = sub_q.filter(JobInstanceSubMax.job_id == JobInstance.job_id).correlate(
JobInstance
)
sub_q = sub_q.group_by(JobInstanceSubMax.job_id).subquery()
return sub_q
def get_total_jobs_status(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totaljobs
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totaljobs
"""
q = self.session.query(Job.job_id)
if self._expand and self._is_root_wf:
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
q = q.filter(Workflow.wf_id.in_(self._wfs))
else:
q = q.filter(Workflow.wf_id == self._wfs[0])
q = q.filter(Job.wf_id == Workflow.wf_id)
if self._get_job_filter() is not None:
q = q.filter(self._get_job_filter())
return q.count()
def get_total_succeeded_failed_jobs_status(self, classify_error=False, tag=None):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totalsucceeded_failed_jobs
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totalsucceededfailedjobs
"""
JobInstanceSub = orm.aliased(JobInstance, name="JobInstanceSub")
sq_1 = self.session.query(
func.max(JobInstanceSub.job_submit_seq).label("jss"),
JobInstanceSub.job_id.label("jobid"),
)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(Workflow.wf_id == self._wfs[0])
sq_1 = sq_1.filter(Workflow.wf_id == Job.wf_id)
sq_1 = sq_1.filter(Job.job_id == JobInstanceSub.job_id)
if self._get_job_filter() is not None:
sq_1 = sq_1.filter(self._get_job_filter())
sq_1 = sq_1.group_by(JobInstanceSub.job_id).subquery()
q = self.session.query(
func.sum(case([(JobInstance.exitcode == 0, 1)], else_=0)).label(
"succeeded"
),
func.sum(case([(JobInstance.exitcode != 0, 1)], else_=0)).label("failed"),
)
q = q.filter(JobInstance.job_id == sq_1.c.jobid)
q = q.filter(JobInstance.job_submit_seq == sq_1.c.jss)
if classify_error:
if tag is None:
self.log.error("for error classification you need to specify tag")
return None
q = q.filter(JobInstance.job_instance_id == Tag.job_instance_id)
q = q.filter(Tag.name == tag)
q = q.filter(Tag.count > 0)
return q.one()
def get_total_held_jobs(self):
"""
SELECT DISTINCT count( job_instance_id) FROM
jobstate j JOIN ( SELECT max(job_instance_id) as maxid FROM job_instance GROUP BY job_id) max_ji ON j.job_instance_id=max_ji.maxid
WHERE j.state = 'JOB_HELD';
"""
sq_1 = self.session.query(
func.max(JobInstance.job_instance_id).label("max_ji_id"),
JobInstance.job_id.label("jobid"),
Job.exec_job_id.label("jobname"),
)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(Workflow.wf_id == self._wfs[0])
sq_1 = sq_1.filter(Workflow.wf_id == Job.wf_id)
sq_1 = sq_1.filter(Job.job_id == JobInstance.job_id)
sq_1 = sq_1.group_by(JobInstance.job_id).subquery()
q = self.session.query(
distinct(Jobstate.job_instance_id.label("last_job_instance")),
sq_1.c.jobid,
sq_1.c.jobname,
Jobstate.reason,
)
q = q.filter(Jobstate.state == "JOB_HELD")
q = q.join(sq_1, Jobstate.job_instance_id == sq_1.c.max_ji_id)
return q.all()
def get_total_succeeded_jobs_status(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totalsucceededjobs
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totalsucceededjobs
"""
JobInstanceSub = orm.aliased(JobInstance, name="JobInstanceSub")
sq_1 = self.session.query(
func.max(JobInstanceSub.job_submit_seq).label("jss"),
JobInstanceSub.job_id.label("jobid"),
)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(Workflow.wf_id == self._wfs[0])
sq_1 = sq_1.filter(Workflow.wf_id == Job.wf_id)
sq_1 = sq_1.filter(Job.job_id == JobInstanceSub.job_id)
if self._get_job_filter() is not None:
sq_1 = sq_1.filter(self._get_job_filter())
sq_1 = sq_1.group_by(JobInstanceSub.job_id).subquery()
q = self.session.query(JobInstance.job_instance_id.label("last_job_instance"))
q = q.filter(JobInstance.job_id == sq_1.c.jobid)
q = q.filter(JobInstance.job_submit_seq == sq_1.c.jss)
q = q.filter(JobInstance.exitcode == 0).filter(
JobInstance.exitcode != None
) # noqa: E711
return q.count()
def _get_total_failed_jobs_status(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totalfailedjobs
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totalfailedjobs
"""
JobInstanceSub = orm.aliased(JobInstance, name="JobInstanceSub")
sq_1 = self.session.query(
func.max(JobInstanceSub.job_submit_seq).label("jss"),
JobInstanceSub.job_id.label("jobid"),
)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(Workflow.wf_id == self._wfs[0])
sq_1 = sq_1.filter(Workflow.wf_id == Job.wf_id)
sq_1 = sq_1.filter(Job.job_id == JobInstanceSub.job_id)
if self._get_job_filter() is not None:
sq_1 = sq_1.filter(self._get_job_filter())
sq_1 = sq_1.group_by(JobInstanceSub.job_id).subquery()
q = self.session.query(JobInstance.job_instance_id.label("last_job_instance"))
q = q.filter(JobInstance.job_id == sq_1.c.jobid)
q = q.filter(JobInstance.job_submit_seq == sq_1.c.jss)
q = q.filter(JobInstance.exitcode != 0).filter(
JobInstance.exitcode != None
) # noqa: E711
return q
def get_total_running_jobs_status(self):
JobInstanceSub = orm.aliased(JobInstance, name="JobInstanceSub")
sq_1 = self.session.query(
func.max(JobInstanceSub.job_submit_seq).label("jss"),
JobInstanceSub.job_id.label("jobid"),
)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(Workflow.wf_id == self._wfs[0])
sq_1 = sq_1.filter(Workflow.wf_id == Job.wf_id)
sq_1 = sq_1.filter(Job.job_id == JobInstanceSub.job_id)
if self._get_job_filter() is not None:
sq_1 = sq_1.filter(self._get_job_filter())
sq_1 = sq_1.group_by(JobInstanceSub.job_id).subquery()
q = self.session.query(JobInstance.job_instance_id.label("last_job_instance"))
q = q.filter(JobInstance.job_id == sq_1.c.jobid)
q = q.filter(JobInstance.job_submit_seq == sq_1.c.jss)
q = q.filter(JobInstance.exitcode == None) # noqa: E711
return q.count()
def get_total_failed_jobs_status(self):
q = self._get_total_failed_jobs_status()
return q.count()
def _query_jobstate_for_instance(self, states):
"""
The states arg is a list of strings.
Returns an appropriate subquery.
"""
q = self.session.query(Jobstate.job_instance_id)
q = q.filter(Jobstate.job_instance_id == JobInstance.job_instance_id).correlate(
JobInstance
)
q = q.filter(Jobstate.state.in_(states)).subquery()
return q
def get_total_jobs_retries(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-TotalJobRetries
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-TotalJobRetries
"""
self._dax_or_dag_cond()
sq_1 = self.session.query(func.count(Job.job_id))
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(Workflow.wf_id == self._wfs[0])
sq_1 = sq_1.filter(Job.wf_id == Workflow.wf_id)
sq_1 = sq_1.filter(Job.job_id == JobInstance.job_id)
if self._get_job_filter() is not None:
sq_1 = sq_1.filter(self._get_job_filter())
sq_1 = sq_1.subquery()
sq_2 = self.session.query(func.count(distinct(JobInstance.job_id)))
if self._expand and self._is_root_wf:
sq_2 = sq_2.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_2 = sq_2.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_2 = sq_2.filter(Workflow.wf_id == self._wfs[0])
sq_2 = sq_2.filter(Job.wf_id == Workflow.wf_id)
sq_2 = sq_2.filter(Job.job_id == JobInstance.job_id)
if self._get_job_filter() is not None:
sq_2 = sq_2.filter(self._get_job_filter())
sq_2 = sq_2.subquery()
q = self.session.query(
(sq_1.as_scalar() - sq_2.as_scalar()).label("total_job_retries")
)
return q.all()[0].total_job_retries
def get_total_tasks_status(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totaltask
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totaltasks
"""
q = self.session.query(Task.task_id)
if self._expand and self._is_root_wf:
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
q = q.filter(Workflow.wf_id.in_(self._wfs))
else:
q = q.filter(Workflow.wf_id == self._wfs[0])
q = q.filter(Task.wf_id == Workflow.wf_id)
q = q.filter(Task.job_id == Job.job_id)
if self._get_job_filter(Task) is not None:
q = q.filter(self._get_job_filter(Task))
return q.count()
def _base_task_status_query_old(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totalsucceededtasks
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totalsucceededtasks
"""
# This query generation method is obsolete and is only being
# kept for optimization reference.
WorkflowSub1 = orm.aliased(Workflow, name="WorkflowSub1")
JobInstanceSub1 = orm.aliased(JobInstance, name="JobInstanceSub1")
JobSub1 = orm.aliased(Job, name="JobSub1")
sq_1 = self.session.query(
WorkflowSub1.wf_id.label("wid"),
func.max(JobInstanceSub1.job_submit_seq).label("jss"),
JobInstanceSub1.job_id.label("jobid"),
)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(WorkflowSub1.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(WorkflowSub1.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(WorkflowSub1.wf_id == self._wfs[0])
sq_1 = sq_1.filter(WorkflowSub1.wf_id == JobSub1.wf_id)
sq_1 = sq_1.filter(JobSub1.job_id == JobInstanceSub1.job_id)
sq_1 = sq_1.group_by(JobInstanceSub1.job_id)
if self._get_job_filter(JobSub1) is not None:
sq_1 = sq_1.filter(self._get_job_filter(JobSub1))
sq_1 = sq_1.subquery()
JobInstanceSub2 = orm.aliased(JobInstance, name="JobInstanceSub2")
sq_2 = self.session.query(
sq_1.c.wid.label("wf_id"),
JobInstanceSub2.job_instance_id.label("last_job_instance_id"),
)
sq_2 = sq_2.filter(JobInstanceSub2.job_id == sq_1.c.jobid)
sq_2 = sq_2.filter(JobInstanceSub2.job_submit_seq == sq_1.c.jss)
sq_2 = sq_2.subquery()
q = self.session.query(Invocation.invocation_id)
q = q.filter(Invocation.abs_task_id != None) # noqa: E711
q = q.filter(Invocation.job_instance_id == sq_2.c.last_job_instance_id)
q = q.filter(Invocation.wf_id == sq_2.c.wf_id)
# Calling wrapper methods would invoke like so:
# q = self._base_task_status_query()
# q = q.filter(Invocation.exitcode == 0)
# return q.count()
return q
def _base_task_statistics_query(self, success=True, pmc=False):
w = orm.aliased(Workflow, name="w")
j = orm.aliased(Job, name="j")
ji = orm.aliased(JobInstance, name="ji")
sq_1 = self.session.query(
w.wf_id,
j.job_id,
ji.job_instance_id.label("jiid"),
ji.job_submit_seq.label("jss"),
func.max(ji.job_submit_seq).label("maxjss"),
)
if pmc:
sq_1 = self.session.query(
w.wf_id,
j.job_id,
ji.job_instance_id.label("jiid"),
ji.job_submit_seq.label("jss"),
)
sq_1 = sq_1.join(j, w.wf_id == j.wf_id)
sq_1 = sq_1.join(ji, j.job_id == ji.job_id)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(w.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(w.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(w.wf_id == self._wfs[0])
if not pmc:
sq_1 = sq_1.group_by(j.job_id)
if self._get_job_filter(j) is not None:
sq_1 = sq_1.filter(self._get_job_filter(j))
sq_1 = sq_1.subquery("t")
# PM-713 - Change to func.count(distinct(Invocation.abs_task_id)) from func.count(Invocation.exitcode)
sq_2 = self.session.query(
sq_1.c.wf_id, func.count(distinct(Invocation.abs_task_id)).label("count")
)
sq_2 = sq_2.select_from(
orm.join(sq_1, Invocation, sq_1.c.jiid == Invocation.job_instance_id)
)
if not pmc:
sq_2 = sq_2.filter(sq_1.c.jss == sq_1.c.maxjss)
sq_2 = sq_2.filter(Invocation.abs_task_id != None) # noqa: E711
if success:
sq_2 = sq_2.filter(Invocation.exitcode == 0)
else:
sq_2 = sq_2.filter(Invocation.exitcode != 0)
sq_2 = sq_2.group_by(sq_1.c.wf_id)
return sq_2
def _task_statistics_query_sum(self, success=True, pmc=False):
s = self._base_task_statistics_query(success, pmc).subquery("tt")
q = self.session.query(func.sum(s.c.count).label("task_count"))
return q.one()[0] or 0
def get_total_succeeded_tasks_status(self, pmc=False):
return self._task_statistics_query_sum(True, pmc)
def get_total_failed_tasks_status(self):
return self._task_statistics_query_sum(False, False)
def get_task_success_report(self, pmc=False):
return self._base_task_statistics_query(True, pmc).all()
def get_task_failure_report(self):
return self._base_task_statistics_query(False, False).all()
def get_total_tasks_retries(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totaltaskretries
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totaltaskretries
"""
sq_1 = self.session.query(
Workflow.wf_id.label("wid"), Invocation.abs_task_id.label("tid")
)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(Workflow.wf_id == self._wfs[0])
sq_1 = sq_1.filter(Job.wf_id == Workflow.wf_id)
sq_1 = sq_1.filter(Invocation.wf_id == Workflow.wf_id)
sq_1 = sq_1.filter(Job.job_id == JobInstance.job_id)
if self._get_job_filter() is not None:
sq_1 = sq_1.filter(self._get_job_filter())
sq_1 = sq_1.filter(JobInstance.job_instance_id == Invocation.job_instance_id)
sq_1 = sq_1.filter(Invocation.abs_task_id != None) # noqa: E711
i = 0
f = {}
for row in sq_1.all():
i += 1
if row not in f:
f[row] = True
return i - len(f.keys())
#
# Run statistics
#
def get_workflow_states(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Workflowwalltime
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Workflowwalltime
"""
q = self.session.query(
Workflowstate.wf_id,
Workflowstate.state,
Workflowstate.timestamp,
Workflowstate.restart_count,
Workflowstate.status,
)
q = q.filter(Workflowstate.wf_id == self._root_wf_id).order_by(
Workflowstate.restart_count
)
return q.all()
def get_workflow_cum_job_wall_time(self):
"""
select sum(remote_duration * multiplier_factor) FROM
invocation as invoc, job_instance as ji WHERE
invoc.task_submit_seq >= 0 and
invoc.job_instance_id = ji.job_instance_id and
invoc.wf_id in (1,2,3) and
invoc.transformation <> 'condor::dagman'
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Workflowcumulativejobwalltime
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Workflowcumulativejobwalltime
"""
q = self.session.query(
cast(
func.sum(Invocation.remote_duration * JobInstance.multiplier_factor),
Float,
),
cast(
func.sum(
case(
[
(
Invocation.exitcode == 0,
Invocation.remote_duration
* JobInstance.multiplier_factor,
)
],
else_=0,
)
).label("goodput"),
Float,
),
cast(
func.sum(
case(
[
(
Invocation.exitcode > 0,
Invocation.remote_duration
* JobInstance.multiplier_factor,
)
],
else_=0,
)
).label("badput"),
Float,
),
)
q = q.filter(Invocation.task_submit_seq >= 0)
q = q.filter(Invocation.job_instance_id == JobInstance.job_instance_id)
if self._expand:
q = q.filter(Invocation.wf_id == Workflow.wf_id)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
else:
q = q.filter(Invocation.wf_id.in_(self._wfs))
q = q.filter(Invocation.transformation != "condor::dagman")
return q.first()
def get_summary_integrity_metrics(self):
"""
:param type: whether integrity type is check | compute
:param file_type: file type input or output
:return:
"""
q = self.session.query(
IntegrityMetrics.type,
func.sum(IntegrityMetrics.duration).label("duration"),
func.sum(IntegrityMetrics.count).label("count"),
)
q = q.group_by(IntegrityMetrics.type)
if self._expand:
q = q.filter(IntegrityMetrics.wf_id == Workflow.wf_id)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
else:
q = q.filter(IntegrityMetrics.wf_id.in_(self._wfs))
# at most two records grouped by type compute | check
return q.all()
def get_tag_metrics(self, name):
"""
:param name: what type of tag to aggregate on
:return:
"""
q = self.session.query(Tag.name, func.sum(Tag.count).label("count"))
q = q.group_by(Tag.name)
q = q.filter(Tag.name == name)
if self._expand:
q = q.filter(Tag.wf_id == Workflow.wf_id)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
else:
q = q.filter(Tag.wf_id.in_(self._wfs))
return q.all()
def get_integrity_metrics(self):
"""
:param type: whether integrity type is check | compute
:param file_type: file type input or output
:return:
"""
q = self.session.query(
IntegrityMetrics.type,
IntegrityMetrics.file_type,
func.sum(IntegrityMetrics.duration).label("duration"),
func.sum(IntegrityMetrics.count).label("count"),
)
q = q.group_by(IntegrityMetrics.type)
q = q.group_by(IntegrityMetrics.file_type)
if self._expand:
q = q.filter(IntegrityMetrics.wf_id == Workflow.wf_id)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
else:
q = q.filter(IntegrityMetrics.wf_id.in_(self._wfs))
"""
for result in q.all():
print result
print result.type
print result.file_type
"""
return q.all()
def get_submit_side_job_wall_time(self):
"""
select sum(local_duration * multiplier_factor) FROM
job_instance as jb_inst, job as jb WHERE
jb_inst.job_id = jb.job_id and
jb.wf_id in (1,2,3) and
((not (jb.type_desc ='dax' or jb.type_desc ='dag'))
or
((jb.type_desc ='dax' or jb.type_desc ='dag') and jb_inst.subwf_id is NULL)
)
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Cumulativejobwalltimeasseenfromsubmitside
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Cumulativejobwalltimeasseenfromsubmitside
"""
q = self.session.query(
cast(
func.sum(JobInstance.local_duration * JobInstance.multiplier_factor),
Float,
).label("wall_time"),
cast(
func.sum(
case(
[
(
JobInstance.exitcode == 0,
JobInstance.local_duration
* JobInstance.multiplier_factor,
)
],
else_=0,
)
).label("goodput"),
Float,
),
cast(
func.sum(
case(
[
(
JobInstance.exitcode > 0,
JobInstance.local_duration
* JobInstance.multiplier_factor,
)
],
else_=0,
)
).label("badput"),
Float,
),
)
q = q.filter(JobInstance.job_id == Job.job_id)
if self._expand:
q = q.filter(Job.wf_id == Workflow.wf_id)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
else:
q = q.filter(Job.wf_id.in_(self._wfs))
if self._expand:
d_or_d = self._dax_or_dag_cond()
q = q.filter(
or_(not_(d_or_d), and_(d_or_d, JobInstance.subwf_id == None))
) # noqa: E711
return q.first()
def get_workflow_details(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Workflowdetails
"""
q = self.session.query(
Workflow.wf_id,
Workflow.wf_uuid,
Workflow.parent_wf_id,
Workflow.root_wf_id,
Workflow.dag_file_name,
Workflow.submit_hostname,
Workflow.submit_dir,
Workflow.planner_arguments,
Workflow.user,
Workflow.grid_dn,
Workflow.planner_version,
Workflow.dax_label,
Workflow.dax_version,
)
q = q.filter(Workflow.wf_id.in_(self._wfs))
return q.all()
def get_workflow_retries(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Workflowretries
"""
sq_1 = self.session.query(func.max(Workflowstate.restart_count).label("retry"))
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
else:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
sq_1 = sq_1.filter(Workflowstate.wf_id == Workflow.wf_id)
sq_1 = sq_1.group_by(Workflowstate.wf_id)
sq_1 = sq_1.subquery()
q = self.session.query(func.sum(sq_1.c.retry).label("total_retry"))
return q.one().total_retry
#
# Job Statistics
# These queries are documented:
# https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file
#
def get_job_statistics(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-All
"""
if self._expand:
return []
sq_1 = self.session.query(func.min(Jobstate.timestamp))
sq_1 = sq_1.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_1 = sq_1.filter(
or_(
Jobstate.state == "GRID_SUBMIT",
Jobstate.state == "GLOBUS_SUBMIT",
Jobstate.state == "EXECUTE",
)
)
sq_1 = sq_1.subquery()
sq_2 = self.session.query(Jobstate.timestamp)
sq_2 = sq_2.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_2 = sq_2.filter(Jobstate.state == "SUBMIT")
sq_2 = sq_2.subquery()
sq_3 = self.session.query(func.min(Jobstate.timestamp))
sq_3 = sq_3.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_3 = sq_3.filter(Jobstate.state == "EXECUTE")
sq_3 = sq_3.subquery()
sq_4 = self.session.query(func.min(Jobstate.timestamp))
sq_4 = sq_4.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_4 = sq_4.filter(
or_(Jobstate.state == "GRID_SUBMIT", Jobstate.state == "GLOBUS_SUBMIT")
)
sq_4 = sq_4.subquery()
sq_5 = self.session.query(func.sum(Invocation.remote_duration))
sq_5 = sq_5.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_5 = sq_5.filter(Invocation.wf_id == Job.wf_id).correlate(Job)
sq_5 = sq_5.filter(Invocation.task_submit_seq >= 0)
sq_5 = sq_5.group_by().subquery()
sq_6 = self.session.query(Jobstate.timestamp)
sq_6 = sq_6.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_6 = sq_6.filter(Jobstate.state == "POST_SCRIPT_TERMINATED")
sq_6 = sq_6.subquery()
sq_7 = self.session.query(func.max(Jobstate.timestamp))
sq_7 = sq_7.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_7 = sq_7.filter(
or_(
Jobstate.state == "POST_SCRIPT_STARTED",
Jobstate.state == "JOB_TERMINATED",
)
)
sq_7 = sq_7.subquery()
sq_8 = self.session.query(func.max(Invocation.exitcode))
sq_8 = sq_8.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_8 = sq_8.filter(Invocation.wf_id == Job.wf_id).correlate(Job)
# PM-704 the task submit sequence needs to be >= -1 to include prescript status
sq_8 = sq_8.filter(Invocation.task_submit_seq >= -1)
sq_8 = sq_8.group_by().subquery()
JobInstanceSub = orm.aliased(JobInstance)
sq_9 = self.session.query(Host.hostname)
sq_9 = sq_9.filter(
JobInstanceSub.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_9 = sq_9.filter(Host.host_id == JobInstanceSub.host_id)
sq_9 = sq_9.subquery()
JI = orm.aliased(JobInstance)
sq_10 = self.session.query(
func.sum(Invocation.remote_duration * JI.multiplier_factor)
)
sq_10 = sq_10.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_10 = sq_10.filter(Invocation.job_instance_id == JI.job_instance_id)
sq_10 = sq_10.filter(Invocation.wf_id == Job.wf_id).correlate(Job)
sq_10 = sq_10.filter(Invocation.task_submit_seq >= 0)
sq_10 = sq_10.group_by().subquery()
sq_11 = self.session.query(func.sum(Invocation.remote_cpu_time))
sq_11 = sq_11.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_11 = sq_11.filter(Invocation.wf_id == Job.wf_id).correlate(Job)
sq_11 = sq_11.filter(Invocation.task_submit_seq >= 0)
sq_11 = sq_11.group_by().subquery()
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
Job.exec_job_id.label("job_name"),
JobInstance.site,
cast(sq_1.as_scalar() - sq_2.as_scalar(), Float).label("condor_q_time"),
cast(sq_3.as_scalar() - sq_4.as_scalar(), Float).label("resource_delay"),
cast(JobInstance.local_duration, Float).label("runtime"),
cast(sq_5.as_scalar(), Float).label("kickstart"),
cast(sq_6.as_scalar() - sq_7.as_scalar(), Float).label("post_time"),
cast(JobInstance.cluster_duration, Float).label("seqexec"),
sq_8.as_scalar().label("exit_code"),
sq_9.as_scalar().label("host_name"),
JobInstance.multiplier_factor,
cast(sq_10.as_scalar(), Float).label("kickstart_multi"),
sq_11.as_scalar().label("remote_cpu_time"),
)
q = q.filter(JobInstance.job_id == Job.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def _state_sub_q(self, states, function=None):
sq = None
if not function:
sq = self.session.query(Jobstate.timestamp)
elif function == "max":
sq = self.session.query(func.max(Jobstate.timestamp))
elif function == "min":
sq = self.session.query(func.min(Jobstate.timestamp))
sq = sq.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq = sq.filter(Jobstate.state.in_(states)).subquery()
return sq
def get_job_states(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-JobStates
"""
if self._expand:
return []
sq_1 = (
self.session.query(Host.hostname)
.filter(Host.host_id == JobInstance.host_id)
.correlate(JobInstance)
.subquery()
)
# select min(timestamp) from jobstate where job_instance_id = jb_inst.job_instance_id
# ) as jobS ,
# (
# select max(timestamp)-min(timestamp) from jobstate where job_instance_id = jb_inst.job_instance_id
# ) as jobDuration,
sq_jobS = self.session.query(func.min(Jobstate.timestamp))
sq_jobS = (
sq_jobS.filter(Jobstate.job_instance_id == JobInstance.job_instance_id)
.correlate(JobInstance)
.subquery()
)
sq_jobD = self.session.query(
func.max(Jobstate.timestamp) - func.min(Jobstate.timestamp)
)
sq_jobD = (
sq_jobD.filter(Jobstate.job_instance_id == JobInstance.job_instance_id)
.correlate(JobInstance)
.subquery()
)
sq_2 = self._state_sub_q(["PRE_SCRIPT_STARTED"])
sq_3 = self._state_sub_q(["PRE_SCRIPT_TERMINATED"])
sq_4 = self._state_sub_q(["PRE_SCRIPT_STARTED"])
sq_5 = self._state_sub_q(["SUBMIT"])
sq_6 = self._state_sub_q(["JOB_TERMINATED"])
sq_7 = self._state_sub_q(["GRID_SUBMIT", "GLOBUS_SUBMIT"], "max")
sq_8 = self._state_sub_q(["EXECUTE"], "min")
sq_9 = self._state_sub_q(["EXECUTE", "SUBMIT"], "max")
sq_10 = self._state_sub_q(["JOB_TERMINATED"])
sq_11 = self.session.query(func.min(Invocation.start_time))
sq_11 = sq_11.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_11 = sq_11.filter(Invocation.wf_id == Job.wf_id).correlate(Job)
sq_11 = sq_11.filter(Invocation.task_submit_seq >= 0)
sq_11 = sq_11.group_by(Invocation.job_instance_id).subquery()
sq_12 = self.session.query(func.sum(Invocation.remote_duration))
sq_12 = sq_12.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_12 = sq_12.filter(Invocation.wf_id == Job.wf_id).correlate(Job)
sq_12 = sq_12.filter(Invocation.task_submit_seq >= 0)
sq_12 = sq_12.group_by(Invocation.job_instance_id).subquery()
sq_13 = self._state_sub_q(["POST_SCRIPT_STARTED", "JOB_TERMINATED"], "max")
sq_14 = self._state_sub_q(["POST_SCRIPT_TERMINATED"])
sq_15 = self.session.query(
func.group_concat(func.distinct(Invocation.transformation))
)
sq_15 = sq_15.filter(Invocation.wf_id.in_(self._wfs))
sq_15 = sq_15.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_15 = sq_15.filter(Invocation.transformation != "dagman::post")
sq_15 = sq_15.filter(Invocation.transformation != "dagman::pre")
sq_15 = sq_15.subquery()
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
Job.exec_job_id.label("job_name"),
JobInstance.site,
sq_1.as_scalar().label("host_name"),
cast(sq_jobS.as_scalar(), Float).label("jobS"),
cast(sq_jobD.as_scalar(), Float).label("jobDuration"),
cast(sq_2.as_scalar(), Float).label("pre_start"),
cast(sq_3.as_scalar() - sq_4.as_scalar(), Float).label("pre_duration"),
cast(sq_5.as_scalar(), Float).label("condor_start"),
cast(sq_6.as_scalar() - sq_5.as_scalar(), Float).label("condor_duration"),
cast(sq_7.as_scalar(), Float).label("grid_start"),
cast(sq_8.as_scalar() - sq_7.as_scalar(), Float).label("grid_duration"),
cast(sq_9.as_scalar(), Float).label("exec_start"),
cast(sq_10.as_scalar() - sq_9.as_scalar(), Float).label("exec_duration"),
cast(sq_11.as_scalar(), Float).label("kickstart_start"),
cast(sq_12.as_scalar(), Float).label("kickstart_duration"),
cast(sq_13.as_scalar(), Float).label("post_start"),
cast(sq_14.as_scalar() - sq_13.as_scalar(), Float).label("post_duration"),
sq_15.as_scalar().label("transformation"),
)
q = q.filter(JobInstance.job_id == Job.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def get_job_instance_sub_wf_map(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Subworkflowjobinstancesmapping
"""
if self._expand:
return []
q = self.session.query(JobInstance.job_instance_id, JobInstance.subwf_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(self._dax_or_dag_cond())
return q.all()
def get_failed_job_instances(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Failedjobinstances
"""
# PM-752 we use the same query that we used to get the count of failed jobs
q = self._get_total_failed_jobs_status()
return q.all()
def get_plots_failed_job_instances(self, final=False, all_jobs=False):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Failedjobinstances
used in the pegasus plots code. is deprecated
"""
if self._expand:
return []
d_or_d = self._dax_or_dag_cond()
if not final:
q = self.session.query(
JobInstance.job_instance_id, JobInstance.job_submit_seq
)
else:
q = self.session.query(
JobInstance.job_instance_id, func.max(JobInstance.job_submit_seq)
)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.filter(Job.job_id == JobInstance.job_id)
if not all_jobs:
q = q.filter(
or_(not_(d_or_d), and_(d_or_d, JobInstance.subwf_id == None))
) # noqa: E711
q = q.filter(JobInstance.exitcode != 0).filter(
JobInstance.exitcode != None
) # noqa: E711
if final:
q = q.group_by(JobInstance.job_id)
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def get_job_instance_info(self, job_instance_id=None):
"""
Job instance information. Pulls all or for one instance.
https://confluence.pegasus.isi.edu/pages/viewpage.action?pageId=14876831
"""
if self._expand:
return []
sq_0 = self.session.query(Workflow.submit_dir)
sq_0 = sq_0.filter(Workflow.wf_id == JobInstance.subwf_id).correlate(
JobInstance
)
sq_0 = sq_0.subquery()
sq_1 = self.session.query(Job.exec_job_id)
sq_1 = sq_1.filter(Job.job_id == JobInstance.job_id).correlate(JobInstance)
sq_1 = sq_1.subquery()
sq_2 = self.session.query(Job.submit_file)
sq_2 = sq_2.filter(Job.job_id == JobInstance.job_id).correlate(JobInstance)
sq_2 = sq_2.subquery()
sq_3 = self.session.query(Job.executable)
sq_3 = sq_3.filter(Job.job_id == JobInstance.job_id).correlate(JobInstance)
sq_3 = sq_3.subquery()
sq_4 = self.session.query(Job.argv)
sq_4 = sq_4.filter(Job.job_id == JobInstance.job_id).correlate(JobInstance)
sq_4 = sq_4.subquery()
sq_5 = self.session.query(Workflow.submit_dir)
sq_5 = sq_5.filter(Workflow.wf_id == self._root_wf_id).subquery()
sq_6 = self.session.query(
func.max(Jobstate.jobstate_submit_seq).label("max_job_submit_seq")
)
sq_6 = sq_6.filter(Jobstate.job_instance_id == job_instance_id)
sq_6 = sq_6.subquery()
sq_7 = self.session.query(Jobstate.state)
sq_7 = sq_7.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_7 = sq_7.filter(Jobstate.jobstate_submit_seq == sq_6.as_scalar())
sq_7 = sq_7.subquery()
sq_8 = self.session.query(Invocation.executable)
sq_8 = sq_8.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_8 = sq_8.filter(Invocation.task_submit_seq == -1)
sq_8 = sq_8.subquery()
sq_9 = self.session.query(Invocation.argv)
sq_9 = sq_9.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_9 = sq_9.filter(Invocation.task_submit_seq == -1)
sq_9 = sq_9.subquery()
sq_10 = self.session.query(Host.hostname)
sq_10 = sq_10.filter(Host.host_id == JobInstance.host_id).correlate(JobInstance)
sq_10 = sq_10.subquery()
q = self.session.query(
JobInstance.job_instance_id,
JobInstance.site,
JobInstance.stdout_file,
JobInstance.stderr_file,
JobInstance.stdout_text,
JobInstance.stderr_text,
JobInstance.work_dir,
sq_0.as_scalar().label("subwf_dir"),
sq_1.as_scalar().label("job_name"),
sq_2.as_scalar().label("submit_file"),
sq_3.as_scalar().label("executable"),
sq_4.as_scalar().label("argv"),
sq_5.as_scalar().label("submit_dir"),
sq_7.as_scalar().label("state"),
sq_8.as_scalar().label("pre_executable"),
sq_9.as_scalar().label("pre_argv"),
sq_10.as_scalar().label("hostname"),
)
if job_instance_id:
q = q.filter(JobInstance.job_instance_id == job_instance_id)
return q.all()
def get_invocation_info(self, ji_id=None):
"""
SELECT task_submit_seq, exitcode, executable, argv, transformation, abs_task_id
FROM invocation WHERE job_instance_id = 7 and wf_id = 1
"""
if self._expand or not ji_id:
return []
q = self.session.query(
Invocation.task_submit_seq,
Invocation.exitcode,
Invocation.executable,
Invocation.argv,
Invocation.transformation,
Invocation.abs_task_id,
)
q = q.filter(Invocation.job_instance_id == ji_id)
q = q.filter(Invocation.wf_id.in_(self._wfs))
return q.all()
def get_job_name(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Name
"""
if self._expand:
return []
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
Job.exec_job_id.label("job_name"),
)
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(Job.wf_id.in_(self._wfs)).order_by(JobInstance.job_submit_seq)
return q.all()
def get_job_site(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Site
"""
if self._expand:
return []
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
JobInstance.site,
)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.filter(Job.job_id == JobInstance.job_id).group_by(Job.job_id)
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def get_job_kickstart(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Kickstart
"""
if self._expand:
return []
sq_1 = self.session.query(
func.sum(Invocation.remote_duration * JobInstance.multiplier_factor)
)
sq_1 = sq_1.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_1 = sq_1.filter(Invocation.wf_id == Job.wf_id).correlate(Job)
sq_1 = sq_1.filter(Invocation.task_submit_seq >= 0)
sq_1 = sq_1.group_by(Invocation.job_instance_id)
sq_1 = sq_1.subquery()
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
cast(sq_1.as_scalar(), Float).label("kickstart"),
)
q = q.filter(JobInstance.job_id == Job.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def get_job_runtime(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Runtime
"""
if self._expand:
return []
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
JobInstance.local_duration.label("runtime"),
)
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.group_by(Job.job_id).order_by(JobInstance.job_submit_seq)
return q.all()
def get_job_seqexec(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Seqexec
"""
if self._expand:
return []
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
JobInstance.cluster_duration,
)
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.filter(Job.clustered != 0)
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def get_condor_q_time(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-CondorQTime
"""
if self._expand:
return []
sq_1 = self.session.query(func.min(Jobstate.timestamp))
sq_1 = sq_1.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_1 = sq_1.filter(
or_(
Jobstate.state == "GRID_SUBMIT",
Jobstate.state == "GLOBUS_SUBMIT",
Jobstate.state == "EXECUTE",
)
)
sq_1 = sq_1.subquery()
sq_2 = self.session.query(Jobstate.timestamp)
sq_2 = sq_2.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_2 = sq_2.filter(Jobstate.state == "SUBMIT")
sq_2 = sq_2.subquery()
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
cast(sq_1.as_scalar() - sq_2.as_scalar(), Float).label("condor_q_time"),
)
q = q.filter(JobInstance.job_id == Job.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def get_resource_delay(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Resource
"""
if self._expand:
return []
sq_1 = self.session.query(func.min(Jobstate.timestamp))
sq_1 = sq_1.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_1 = sq_1.filter(Jobstate.state == "EXECUTE")
sq_1 = sq_1.subquery()
sq_2 = self.session.query(Jobstate.timestamp)
sq_2 = sq_2.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_2 = sq_2.filter(
or_(Jobstate.state == "GRID_SUBMIT", Jobstate.state == "GLOBUS_SUBMIT")
)
sq_2 = sq_2.subquery()
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
cast(sq_1.as_scalar() - sq_2.as_scalar(), Float).label("resource_delay"),
)
q = q.filter(JobInstance.job_id == Job.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def get_post_time(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Post
"""
if self._expand:
return []
sq_1 = self.session.query(Jobstate.timestamp)
sq_1 = sq_1.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_1 = sq_1.filter(Jobstate.state == "POST_SCRIPT_TERMINATED")
sq_1 = sq_1.subquery()
sq_2 = self.session.query(func.max(Jobstate.timestamp))
sq_2 = sq_2.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_2 = sq_2.filter(
or_(
Jobstate.state == "POST_SCRIPT_STARTED",
Jobstate.state == "JOB_TERMINATED",
)
)
sq_2 = sq_2.subquery()
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
cast(sq_1.as_scalar() - sq_2.as_scalar(), Float).label("post_time"),
)
q = q.filter(JobInstance.job_id == Job.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
#
# This query documented:
# https://confluence.pegasus.isi.edu/display/pegasus/Transformation+Statistics+file
#
def get_transformation_statistics(self):
"""
SELECT transformation,
count(invocation_id) as count,
min(remote_duration * multiplier_factor) as min,
count(CASE WHEN (invoc.exitcode = 0 and invoc.exitcode is NOT NULL) THEN invoc.exitcode END) AS success,
count(CASE WHEN (invoc.exitcode != 0 and invoc.exitcode is NOT NULL) THEN invoc.exitcode END) AS failure,
max(remote_duration * multiplier_factor) as max,
avg(remote_duration * multiplier_factor) as avg,
sum(remote_duration * multiplier_factor) as sum
FROM
invocation as invoc, job_instance as ji WHERE
invoc.job_instance_id = ji.job_instance_id and
invoc.wf_id IN (1,2,3) GROUP BY transformation
"""
q = self.session.query(
Invocation.transformation,
case([(Invocation.exitcode == 0, "successful")], else_="failed").label(
"type"
),
func.count(Invocation.invocation_id).label("count"),
func.count(case([(Invocation.exitcode == 0, Invocation.exitcode)])).label(
"success"
),
func.count(case([(Invocation.exitcode != 0, Invocation.exitcode)])).label(
"failure"
),
# runtime
cast(
func.min(Invocation.remote_duration * JobInstance.multiplier_factor),
Float,
).label("min"),
cast(
func.max(Invocation.remote_duration * JobInstance.multiplier_factor),
Float,
).label("max"),
cast(
func.avg(Invocation.remote_duration * JobInstance.multiplier_factor),
Float,
).label("avg"),
cast(
func.sum(Invocation.remote_duration * JobInstance.multiplier_factor),
Float,
).label("sum"),
# maxrss
func.min(Invocation.maxrss).label("min_maxrss"),
func.max(Invocation.maxrss).label("max_maxrss"),
cast(func.avg(Invocation.maxrss), Float,).label("avg_maxrss"),
# avg_cpu
cast(func.min(Invocation.avg_cpu), Float,).label("min_avg_cpu"),
cast(func.max(Invocation.avg_cpu), Float,).label("max_avg_cpu"),
cast(func.avg(Invocation.avg_cpu), Float,).label("avg_avg_cpu"),
)
q = q.filter(Invocation.job_instance_id == JobInstance.job_instance_id)
q = q.filter(Invocation.wf_id.in_(self._wfs))
q = q.group_by(Invocation.transformation).group_by("type")
q = q.order_by(Invocation.transformation)
return q.all()
#
# Runtime queries
# https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries
#
def _get_date_divisors(self):
vals = {"month": 2629743, "week": 604800, "day": 86400, "hour": 3600}
return vals[self._time_filter_mode]
def _get_host_filter(self):
if self._host_filter is None:
return None
elif isinstance(self._host_filter, str):
return Host.hostname == self._host_filter
elif isinstance(self._host_filter, type([])):
return Host.hostname.in_(self._host_filter)
else:
return None
def _get_xform_filter(self):
if (
self._xform_filter["include"] is not None
and self._xform_filter["exclude"] is not None
):
self.log.error(
"Can't set both transform include and exclude - reset s.set_transformation_filter()"
)
return None
elif (
self._xform_filter["include"] is None
and self._xform_filter["exclude"] is None
):
return None
elif self._xform_filter["include"] is not None:
if isinstance(self._xform_filter["include"], str):
return Invocation.transformation == self._xform_filter["include"]
elif isinstance(self._xform_filter["include"], type([])):
return Invocation.transformation.in_(self._xform_filter["include"])
else:
return None
elif self._xform_filter["exclude"] is not None:
if isinstance(self._xform_filter["exclude"], str):
return Invocation.transformation != self._xform_filter["exclude"]
elif isinstance(self._xform_filter["exclude"], type([])):
return not_(
Invocation.transformation.in_(self._xform_filter["exclude"])
)
else:
return None
else:
return None
def get_invocation_by_time(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries
"""
q = self.session.query(
(cast(Invocation.start_time / self._get_date_divisors(), Integer)).label(
"date_format"
),
func.count(Invocation.invocation_id).label("count"),
cast(func.sum(Invocation.remote_duration), Float).label("total_runtime"),
)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
q = q.filter(Invocation.wf_id == Workflow.wf_id)
if self._get_xform_filter() is not None:
q = q.filter(self._get_xform_filter())
q = q.group_by("date_format").order_by("date_format")
return q.all()
def get_jobs_run_by_time(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries
"""
q = self.session.query(
(cast(Jobstate.timestamp / self._get_date_divisors(), Integer)).label(
"date_format"
),
func.count(JobInstance.job_instance_id).label("count"),
cast(func.sum(JobInstance.local_duration), Float).label("total_runtime"),
)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
q = q.filter(Workflow.wf_id == Job.wf_id)
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(Jobstate.job_instance_id == JobInstance.job_instance_id)
q = q.filter(Jobstate.state == "EXECUTE")
q = q.filter(JobInstance.local_duration != None) # noqa: E711
if self._get_job_filter() is not None:
q = q.filter(self._get_job_filter())
q = q.group_by("date_format").order_by("date_format")
return q.all()
def get_invocation_by_time_per_host(self, host=None):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries
"""
q = self.session.query(
(cast(Invocation.start_time / self._get_date_divisors(), Integer)).label(
"date_format"
),
Host.hostname.label("host_name"),
func.count(Invocation.invocation_id).label("count"),
cast(func.sum(Invocation.remote_duration), Float).label("total_runtime"),
)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
q = q.filter(Invocation.wf_id == Workflow.wf_id)
q = q.filter(JobInstance.job_instance_id == Invocation.job_instance_id)
q = q.filter(JobInstance.host_id == Host.host_id)
if self._get_host_filter() is not None:
q = q.filter(self._get_host_filter())
if self._get_xform_filter() is not None:
q = q.filter(self._get_xform_filter())
q = q.group_by("date_format", "host_name").order_by("date_format")
return q.all()
def get_jobs_run_by_time_per_host(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries
"""
q = self.session.query(
(cast(Jobstate.timestamp / self._get_date_divisors(), Integer)).label(
"date_format"
),
Host.hostname.label("host_name"),
func.count(JobInstance.job_instance_id).label("count"),
cast(func.sum(JobInstance.local_duration), Float).label("total_runtime"),
)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
q = q.filter(Workflow.wf_id == Job.wf_id)
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(Jobstate.job_instance_id == JobInstance.job_instance_id)
q = q.filter(Jobstate.state == "EXECUTE")
q = q.filter(JobInstance.host_id == Host.host_id)
if self._get_host_filter() is not None:
q = q.filter(self._get_host_filter())
if self._get_job_filter() is not None:
q = q.filter(self._get_job_filter())
q = q.group_by("date_format", "host_name").order_by("date_format")
return q.all()
|
<filename>packages/pegasus-python/src/Pegasus/db/workflow/stampede_statistics.py
"""
Library to generate statistics from the new Stampede 3.1 backend.
Usage::
stats = StampedeStatistics(connString='sqlite:///montage.db')
stats.initialize('unique_wf_uuid')
stats.set_job_filter('dax')
print stats.get_total_jobs_status()
print stats.get_total_succeeded_jobs_status()
stats.set_job_filter('dag')
print stats.get_total_jobs_status()
print stats.get_total_succeeded_jobs_status()
etc.
stats.close()
Constructor and initialize methods:
The constructor takes a required sqlalchemy connection string
as the first argument. The stats class will default to returning
data in the "expanded workflow" mode. To change this behavior
and only analyize a single workflow set the optional arg:
expand_workflow = False
along with the connection string argument.
The initialize method is called with a single argument - the wf_uuid
of the desired "root workflow" whether returning data in expanded
mode or not. The method will return True or False if a query
exception is raised so the programmer can test for success before
calling the subsequent query methods. This method is intended
to be called once per object.
Job filtering:
Jobs can be filtered using any of the strings in the jobtype ENUM,
with the addition of the values 'all' and 'nonsub' which will
return all jobs and non-subworkflow jobs respectively. If the
filter is not explicitly set, it will default to the 'all' mode.
The desired filter can be set with the set_job_filter() method. After
setting this method, all subsequent calls to the query methods will
return results according to the filter. This can be set and reset
as many times as the user desires. There is an example of re/setting
the job filter in the usage section above. The query methods
will return different values after the filter is re/set.
Time filtering:
This behaves much like job filtering. For the runtime queries,
the time intervals 'month', 'week', 'day', and 'hour' can
be set using the set_time_filter() method. If this method
is not set, it will default to the 'month' interval for filtering.
Hostname filtering:
For the runtime queries the method set_host_filter() can be used to
filter by various hosts. This method differs from the job and time
filtering methods in that the argument can be either a string (for
a single hostname), or an array/list of hostnames for multiple
hostnames.
Example::
s.set_host_filter('butterfly.isi.edu')
or
s.set_host_filter(['engage-submit3.renci.org', 'node0012.palmetto.clemson.edu'])
Either one of these variations will work. The first variation will
only retrieve data for that one host, the second will return data
for both hosts. If this method is not set, no hostname filtering
will be done and information for all hosts will be returned.
Transformation filtering:
Transformation filtering works similarly to hostname filtering in
that it can accept a single string value or a array/list of strings.
However the set_transformation_filter() method accepts two keyword
arguments - 'include' and 'exclude'. Only one of these keywords can
be set per method call.
Example::
s.set_transformation_filter(include='pegasus::dirmanager')
s.set_transformation_filter(exclude=['dagman::post' , 'dagman::pre' ,'condor::dagman'])
etc.
This example demonstrates the two proper keyword invocations and
that either a string or list may be used. If this method is not
set, no filtering will be done and information for all transforms
will be returned. Calling this method with no arguments will
reset any previously set filters.
Return values from methods:
The return value types will vary from method to method. Most of
the methods will return a single integer or floating point number.
Methods which return rows from the DB (rather than just a number)
will return a list which can be interacted with in one of two
ways - either by array index (list of tuples) or by a named attr
(list of objects). The two following methods of interacting with
the same query results will both produce the same output:
Example::
for row in s.get_job_kickstart():
print row[0], row[1], row[2]
print row.job_id, row.job_name, row.kickstart
Either syntax will work. When using the named attribute method, the
attributes are the names of the columns/aliases in the SELECT
stanza of the query. If the row returned by the method is printed,
it will display as a tuple of results per row.
Methods::
get_sub_workflow_ids
get_descendant_workflow_ids
get_schema_version
get_total_jobs_status
get_total_succeeded_failed_jobs_status
get_total_succeeded_jobs_status
get_total_failed_jobs_status
get_total_jobs_retries
get_total_tasks_status
get_total_succeeded_tasks_status
get_total_failed_tasks_status
get_task_success_report
get_task_failure_report
get_total_tasks_retries
get_workflow_states
get_workflow_cum_job_wall_time
get_submit_side_job_wall_time
get_workflow_details
get_workflow_retries
get_job_statistics
get_job_states
get_job_instance_sub_wf_map
get_failed_job_instances
get_job_instance_info
get_job_name
get_job_site
get_job_kickstart
get_job_runtime
get_job_seqexec
get_condor_q_time
get_resource_delay
get_post_time
get_transformation_statistics
get_invocation_by_time
get_jobs_run_by_time
get_invocation_by_time_per_host
get_jobs_run_by_time_per_host
Methods listed in order of query list on wiki.
https://confluence.pegasus.isi.edu/display/pegasus/Pegasus+Statistics+Python+Version+Modified
"""
__author__ = "<NAME>"
import logging
from sqlalchemy import orm
from sqlalchemy.sql.expression import and_, case, cast, distinct, func, not_, or_
from sqlalchemy.types import Float, Integer
from Pegasus.db import connection
from Pegasus.db.errors import StampedeDBNotFoundError
from Pegasus.db.schema import *
# Main stats class.
class StampedeStatistics:
def __init__(self, connString, expand_workflow=True):
self.log = logging.getLogger(
"{}.{}".format(self.__module__, self.__class__.__name__)
)
try:
self.session = connection.connect(connString)
except connection.ConnectionError as e:
self.log.exception(e)
raise StampedeDBNotFoundError
self._expand = expand_workflow
self._root_wf_id = None
self._root_wf_uuid = None
self._job_filter_mode = None
self._time_filter_mode = None
self._host_filter = None
self._xform_filter = {"include": None, "exclude": None}
self._wfs = []
def initialize(self, root_wf_uuid=None, root_wf_id=None):
if root_wf_uuid is None and root_wf_id is None:
self.log.error("Either root_wf_uuid or root_wf_id is required")
raise ValueError("Either root_wf_uuid or root_wf_id is required")
q = self.session.query(Workflow.root_wf_id, Workflow.wf_id, Workflow.wf_uuid)
if root_wf_uuid:
q = q.filter(Workflow.wf_uuid == root_wf_uuid)
else:
q = q.filter(Workflow.wf_id == root_wf_id)
try:
result = q.one()
self._root_wf_id = result.wf_id
self._root_wf_uuid = result.wf_uuid
self._is_root_wf = result.root_wf_id == result.wf_id
except orm.exc.MultipleResultsFound as e:
self.log.error("Multiple results found for wf_uuid: %s", root_wf_uuid)
raise
except orm.exc.NoResultFound as e:
self.log.error("No results found for wf_uuid: %s", root_wf_uuid)
raise
self._wfs.insert(0, self._root_wf_id)
if self._expand:
"""
select parent_wf_id, wf_id from workflow where root_wf_id =
(select root_wf_id from workflow where wf_id=self._root_wf_id);
"""
sub_q = (
self.session.query(Workflow.root_wf_id)
.filter(Workflow.wf_id == self._root_wf_id)
.subquery("root_wf")
)
q = self.session.query(Workflow.parent_wf_id, Workflow.wf_id).filter(
Workflow.root_wf_id == sub_q.c.root_wf_id
)
# @tree will hold the entire sub-work-flow dependency structure.
tree = {}
for row in q.all():
parent_node = row.parent_wf_id
if parent_node in tree:
tree[parent_node].append(row.wf_id)
else:
tree[parent_node] = [row.wf_id]
self._get_descendants(tree, self._root_wf_id)
self.log.debug("Descendant workflow ids %s", self._wfs)
if not len(self._wfs):
self.log.error("No results found for wf_uuid: %s", root_wf_uuid)
raise ValueError("No results found for wf_uuid: %s", root_wf_uuid)
# Initialize filters with default value
self.set_job_filter()
self.set_time_filter()
self.set_host_filter()
self.set_transformation_filter()
return True
def _get_descendants(self, tree, wf_node):
"""
If the root_wf_uuid given to initialize function is not the UUID of the root work-flow, and
expand_workflow was set to True, then this recursive function determines all child work-flows.
@tree A dictionary when key is the parent_wf_id and value is a list of its child wf_id's.
@wf_node The node for which to determine descendants.
"""
if tree is None or wf_node is None:
raise ValueError("Tree, or node cannot be None")
if wf_node in tree:
self._wfs.extend(tree[wf_node])
for wf in tree[wf_node]:
self._get_descendants(tree, wf)
def close(self):
self.log.debug("close")
self.session.close()
def set_job_filter(self, filter="all"):
modes = [
"all",
"nonsub",
"subwf",
"dax",
"dag",
"compute",
"stage-in-tx",
"stage-out-tx",
"registration",
"inter-site-tx",
"create-dir",
"staged-compute",
"cleanup",
"chmod",
]
try:
modes.index(filter)
self._job_filter_mode = filter
self.log.debug("Setting filter to: %s", filter)
except Exception:
self._job_filter_mode = "all"
self.log.error("Unknown job filter %s - setting to all", filter)
def set_time_filter(self, filter="month"):
modes = ["month", "week", "day", "hour"]
try:
modes.index(filter)
self._time_filter_mode = filter
self.log.debug("Setting filter to: %s", filter)
except Exception:
self._time_filter_mode = "month"
self.log.error("Unknown time filter %s - setting to month", filter)
def set_host_filter(self, host=None):
"""
The host argument can either be a string/single hostname or
it can be a list/array of hostnames.
"""
self._host_filter = host
def set_transformation_filter(self, include=None, exclude=None):
"""
Either of these args can either be a single string/xform type or
it can be a list/array of xform types.
Both arguments can not be set at the same time. If they are,
the program will log an error and not do any filtering.
"""
self._xform_filter["include"] = include
self._xform_filter["exclude"] = exclude
#
# Pulls information about sub workflows
#
def get_sub_workflow_ids(self):
"""
Returns info on child workflows only.
"""
q = self.session.query(Workflow.wf_id, Workflow.wf_uuid, Workflow.dax_label)
q = q.filter(Workflow.parent_wf_id == self._root_wf_id)
return q.all()
def get_descendant_workflow_ids(self):
q = self.session.query(Workflow.wf_id, Workflow.wf_uuid)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
q = q.filter(Workflow.wf_id != self._root_wf_id)
return q.all()
def get_schema_version(self):
return self.s_check.check_version()
#
# Status of initially planned wf components.
#
#
# The following block of queries are documented here:
# https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary
# and
# https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file
#
def _dax_or_dag_cond(self, JobO=Job):
return or_(JobO.type_desc == "dax", JobO.type_desc == "dag")
def _get_job_filter(self, JobO=Job):
filters = {
"all": None,
"nonsub": not_(self._dax_or_dag_cond(JobO)),
"subwf": self._dax_or_dag_cond(JobO),
"dax": JobO.type_desc == "dax",
"dag": JobO.type_desc == "dag",
"compute": JobO.type_desc == "compute",
"stage-in-tx": JobO.type_desc == "stage-in-tx",
"stage-out-tx": JobO.type_desc == "stage-out-tx",
"registration": JobO.type_desc == "registration",
"inter-site-tx": JobO.type_desc == "inter-site-tx",
"create-dir": JobO.type_desc == "create-dir",
"staged-compute": JobO.type_desc == "staged-compute",
"cleanup": JobO.type_desc == "cleanup",
"chmod": JobO.type_desc == "chmod",
}
return filters[self._job_filter_mode]
def _max_job_seq_subquery(self):
"""
Creates the following subquery that is used in
several queries:
and jb_inst.job_submit_seq = (
select max(job_submit_seq) from job_instance where job_id = jb_inst.job_id group by job_id
)
"""
JobInstanceSubMax = orm.aliased(JobInstance)
sub_q = self.session.query(
func.max(JobInstanceSubMax.job_submit_seq).label("max_id")
)
sub_q = sub_q.filter(JobInstanceSubMax.job_id == JobInstance.job_id).correlate(
JobInstance
)
sub_q = sub_q.group_by(JobInstanceSubMax.job_id).subquery()
return sub_q
def get_total_jobs_status(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totaljobs
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totaljobs
"""
q = self.session.query(Job.job_id)
if self._expand and self._is_root_wf:
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
q = q.filter(Workflow.wf_id.in_(self._wfs))
else:
q = q.filter(Workflow.wf_id == self._wfs[0])
q = q.filter(Job.wf_id == Workflow.wf_id)
if self._get_job_filter() is not None:
q = q.filter(self._get_job_filter())
return q.count()
def get_total_succeeded_failed_jobs_status(self, classify_error=False, tag=None):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totalsucceeded_failed_jobs
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totalsucceededfailedjobs
"""
JobInstanceSub = orm.aliased(JobInstance, name="JobInstanceSub")
sq_1 = self.session.query(
func.max(JobInstanceSub.job_submit_seq).label("jss"),
JobInstanceSub.job_id.label("jobid"),
)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(Workflow.wf_id == self._wfs[0])
sq_1 = sq_1.filter(Workflow.wf_id == Job.wf_id)
sq_1 = sq_1.filter(Job.job_id == JobInstanceSub.job_id)
if self._get_job_filter() is not None:
sq_1 = sq_1.filter(self._get_job_filter())
sq_1 = sq_1.group_by(JobInstanceSub.job_id).subquery()
q = self.session.query(
func.sum(case([(JobInstance.exitcode == 0, 1)], else_=0)).label(
"succeeded"
),
func.sum(case([(JobInstance.exitcode != 0, 1)], else_=0)).label("failed"),
)
q = q.filter(JobInstance.job_id == sq_1.c.jobid)
q = q.filter(JobInstance.job_submit_seq == sq_1.c.jss)
if classify_error:
if tag is None:
self.log.error("for error classification you need to specify tag")
return None
q = q.filter(JobInstance.job_instance_id == Tag.job_instance_id)
q = q.filter(Tag.name == tag)
q = q.filter(Tag.count > 0)
return q.one()
def get_total_held_jobs(self):
"""
SELECT DISTINCT count( job_instance_id) FROM
jobstate j JOIN ( SELECT max(job_instance_id) as maxid FROM job_instance GROUP BY job_id) max_ji ON j.job_instance_id=max_ji.maxid
WHERE j.state = 'JOB_HELD';
"""
sq_1 = self.session.query(
func.max(JobInstance.job_instance_id).label("max_ji_id"),
JobInstance.job_id.label("jobid"),
Job.exec_job_id.label("jobname"),
)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(Workflow.wf_id == self._wfs[0])
sq_1 = sq_1.filter(Workflow.wf_id == Job.wf_id)
sq_1 = sq_1.filter(Job.job_id == JobInstance.job_id)
sq_1 = sq_1.group_by(JobInstance.job_id).subquery()
q = self.session.query(
distinct(Jobstate.job_instance_id.label("last_job_instance")),
sq_1.c.jobid,
sq_1.c.jobname,
Jobstate.reason,
)
q = q.filter(Jobstate.state == "JOB_HELD")
q = q.join(sq_1, Jobstate.job_instance_id == sq_1.c.max_ji_id)
return q.all()
def get_total_succeeded_jobs_status(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totalsucceededjobs
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totalsucceededjobs
"""
JobInstanceSub = orm.aliased(JobInstance, name="JobInstanceSub")
sq_1 = self.session.query(
func.max(JobInstanceSub.job_submit_seq).label("jss"),
JobInstanceSub.job_id.label("jobid"),
)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(Workflow.wf_id == self._wfs[0])
sq_1 = sq_1.filter(Workflow.wf_id == Job.wf_id)
sq_1 = sq_1.filter(Job.job_id == JobInstanceSub.job_id)
if self._get_job_filter() is not None:
sq_1 = sq_1.filter(self._get_job_filter())
sq_1 = sq_1.group_by(JobInstanceSub.job_id).subquery()
q = self.session.query(JobInstance.job_instance_id.label("last_job_instance"))
q = q.filter(JobInstance.job_id == sq_1.c.jobid)
q = q.filter(JobInstance.job_submit_seq == sq_1.c.jss)
q = q.filter(JobInstance.exitcode == 0).filter(
JobInstance.exitcode != None
) # noqa: E711
return q.count()
def _get_total_failed_jobs_status(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totalfailedjobs
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totalfailedjobs
"""
JobInstanceSub = orm.aliased(JobInstance, name="JobInstanceSub")
sq_1 = self.session.query(
func.max(JobInstanceSub.job_submit_seq).label("jss"),
JobInstanceSub.job_id.label("jobid"),
)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(Workflow.wf_id == self._wfs[0])
sq_1 = sq_1.filter(Workflow.wf_id == Job.wf_id)
sq_1 = sq_1.filter(Job.job_id == JobInstanceSub.job_id)
if self._get_job_filter() is not None:
sq_1 = sq_1.filter(self._get_job_filter())
sq_1 = sq_1.group_by(JobInstanceSub.job_id).subquery()
q = self.session.query(JobInstance.job_instance_id.label("last_job_instance"))
q = q.filter(JobInstance.job_id == sq_1.c.jobid)
q = q.filter(JobInstance.job_submit_seq == sq_1.c.jss)
q = q.filter(JobInstance.exitcode != 0).filter(
JobInstance.exitcode != None
) # noqa: E711
return q
def get_total_running_jobs_status(self):
JobInstanceSub = orm.aliased(JobInstance, name="JobInstanceSub")
sq_1 = self.session.query(
func.max(JobInstanceSub.job_submit_seq).label("jss"),
JobInstanceSub.job_id.label("jobid"),
)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(Workflow.wf_id == self._wfs[0])
sq_1 = sq_1.filter(Workflow.wf_id == Job.wf_id)
sq_1 = sq_1.filter(Job.job_id == JobInstanceSub.job_id)
if self._get_job_filter() is not None:
sq_1 = sq_1.filter(self._get_job_filter())
sq_1 = sq_1.group_by(JobInstanceSub.job_id).subquery()
q = self.session.query(JobInstance.job_instance_id.label("last_job_instance"))
q = q.filter(JobInstance.job_id == sq_1.c.jobid)
q = q.filter(JobInstance.job_submit_seq == sq_1.c.jss)
q = q.filter(JobInstance.exitcode == None) # noqa: E711
return q.count()
def get_total_failed_jobs_status(self):
q = self._get_total_failed_jobs_status()
return q.count()
def _query_jobstate_for_instance(self, states):
"""
The states arg is a list of strings.
Returns an appropriate subquery.
"""
q = self.session.query(Jobstate.job_instance_id)
q = q.filter(Jobstate.job_instance_id == JobInstance.job_instance_id).correlate(
JobInstance
)
q = q.filter(Jobstate.state.in_(states)).subquery()
return q
def get_total_jobs_retries(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-TotalJobRetries
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-TotalJobRetries
"""
self._dax_or_dag_cond()
sq_1 = self.session.query(func.count(Job.job_id))
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(Workflow.wf_id == self._wfs[0])
sq_1 = sq_1.filter(Job.wf_id == Workflow.wf_id)
sq_1 = sq_1.filter(Job.job_id == JobInstance.job_id)
if self._get_job_filter() is not None:
sq_1 = sq_1.filter(self._get_job_filter())
sq_1 = sq_1.subquery()
sq_2 = self.session.query(func.count(distinct(JobInstance.job_id)))
if self._expand and self._is_root_wf:
sq_2 = sq_2.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_2 = sq_2.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_2 = sq_2.filter(Workflow.wf_id == self._wfs[0])
sq_2 = sq_2.filter(Job.wf_id == Workflow.wf_id)
sq_2 = sq_2.filter(Job.job_id == JobInstance.job_id)
if self._get_job_filter() is not None:
sq_2 = sq_2.filter(self._get_job_filter())
sq_2 = sq_2.subquery()
q = self.session.query(
(sq_1.as_scalar() - sq_2.as_scalar()).label("total_job_retries")
)
return q.all()[0].total_job_retries
def get_total_tasks_status(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totaltask
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totaltasks
"""
q = self.session.query(Task.task_id)
if self._expand and self._is_root_wf:
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
q = q.filter(Workflow.wf_id.in_(self._wfs))
else:
q = q.filter(Workflow.wf_id == self._wfs[0])
q = q.filter(Task.wf_id == Workflow.wf_id)
q = q.filter(Task.job_id == Job.job_id)
if self._get_job_filter(Task) is not None:
q = q.filter(self._get_job_filter(Task))
return q.count()
def _base_task_status_query_old(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totalsucceededtasks
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totalsucceededtasks
"""
# This query generation method is obsolete and is only being
# kept for optimization reference.
WorkflowSub1 = orm.aliased(Workflow, name="WorkflowSub1")
JobInstanceSub1 = orm.aliased(JobInstance, name="JobInstanceSub1")
JobSub1 = orm.aliased(Job, name="JobSub1")
sq_1 = self.session.query(
WorkflowSub1.wf_id.label("wid"),
func.max(JobInstanceSub1.job_submit_seq).label("jss"),
JobInstanceSub1.job_id.label("jobid"),
)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(WorkflowSub1.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(WorkflowSub1.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(WorkflowSub1.wf_id == self._wfs[0])
sq_1 = sq_1.filter(WorkflowSub1.wf_id == JobSub1.wf_id)
sq_1 = sq_1.filter(JobSub1.job_id == JobInstanceSub1.job_id)
sq_1 = sq_1.group_by(JobInstanceSub1.job_id)
if self._get_job_filter(JobSub1) is not None:
sq_1 = sq_1.filter(self._get_job_filter(JobSub1))
sq_1 = sq_1.subquery()
JobInstanceSub2 = orm.aliased(JobInstance, name="JobInstanceSub2")
sq_2 = self.session.query(
sq_1.c.wid.label("wf_id"),
JobInstanceSub2.job_instance_id.label("last_job_instance_id"),
)
sq_2 = sq_2.filter(JobInstanceSub2.job_id == sq_1.c.jobid)
sq_2 = sq_2.filter(JobInstanceSub2.job_submit_seq == sq_1.c.jss)
sq_2 = sq_2.subquery()
q = self.session.query(Invocation.invocation_id)
q = q.filter(Invocation.abs_task_id != None) # noqa: E711
q = q.filter(Invocation.job_instance_id == sq_2.c.last_job_instance_id)
q = q.filter(Invocation.wf_id == sq_2.c.wf_id)
# Calling wrapper methods would invoke like so:
# q = self._base_task_status_query()
# q = q.filter(Invocation.exitcode == 0)
# return q.count()
return q
def _base_task_statistics_query(self, success=True, pmc=False):
w = orm.aliased(Workflow, name="w")
j = orm.aliased(Job, name="j")
ji = orm.aliased(JobInstance, name="ji")
sq_1 = self.session.query(
w.wf_id,
j.job_id,
ji.job_instance_id.label("jiid"),
ji.job_submit_seq.label("jss"),
func.max(ji.job_submit_seq).label("maxjss"),
)
if pmc:
sq_1 = self.session.query(
w.wf_id,
j.job_id,
ji.job_instance_id.label("jiid"),
ji.job_submit_seq.label("jss"),
)
sq_1 = sq_1.join(j, w.wf_id == j.wf_id)
sq_1 = sq_1.join(ji, j.job_id == ji.job_id)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(w.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(w.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(w.wf_id == self._wfs[0])
if not pmc:
sq_1 = sq_1.group_by(j.job_id)
if self._get_job_filter(j) is not None:
sq_1 = sq_1.filter(self._get_job_filter(j))
sq_1 = sq_1.subquery("t")
# PM-713 - Change to func.count(distinct(Invocation.abs_task_id)) from func.count(Invocation.exitcode)
sq_2 = self.session.query(
sq_1.c.wf_id, func.count(distinct(Invocation.abs_task_id)).label("count")
)
sq_2 = sq_2.select_from(
orm.join(sq_1, Invocation, sq_1.c.jiid == Invocation.job_instance_id)
)
if not pmc:
sq_2 = sq_2.filter(sq_1.c.jss == sq_1.c.maxjss)
sq_2 = sq_2.filter(Invocation.abs_task_id != None) # noqa: E711
if success:
sq_2 = sq_2.filter(Invocation.exitcode == 0)
else:
sq_2 = sq_2.filter(Invocation.exitcode != 0)
sq_2 = sq_2.group_by(sq_1.c.wf_id)
return sq_2
def _task_statistics_query_sum(self, success=True, pmc=False):
s = self._base_task_statistics_query(success, pmc).subquery("tt")
q = self.session.query(func.sum(s.c.count).label("task_count"))
return q.one()[0] or 0
def get_total_succeeded_tasks_status(self, pmc=False):
return self._task_statistics_query_sum(True, pmc)
def get_total_failed_tasks_status(self):
return self._task_statistics_query_sum(False, False)
def get_task_success_report(self, pmc=False):
return self._base_task_statistics_query(True, pmc).all()
def get_task_failure_report(self):
return self._base_task_statistics_query(False, False).all()
def get_total_tasks_retries(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totaltaskretries
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totaltaskretries
"""
sq_1 = self.session.query(
Workflow.wf_id.label("wid"), Invocation.abs_task_id.label("tid")
)
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
elif self._expand and not self._is_root_wf:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
else:
sq_1 = sq_1.filter(Workflow.wf_id == self._wfs[0])
sq_1 = sq_1.filter(Job.wf_id == Workflow.wf_id)
sq_1 = sq_1.filter(Invocation.wf_id == Workflow.wf_id)
sq_1 = sq_1.filter(Job.job_id == JobInstance.job_id)
if self._get_job_filter() is not None:
sq_1 = sq_1.filter(self._get_job_filter())
sq_1 = sq_1.filter(JobInstance.job_instance_id == Invocation.job_instance_id)
sq_1 = sq_1.filter(Invocation.abs_task_id != None) # noqa: E711
i = 0
f = {}
for row in sq_1.all():
i += 1
if row not in f:
f[row] = True
return i - len(f.keys())
#
# Run statistics
#
def get_workflow_states(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Workflowwalltime
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Workflowwalltime
"""
q = self.session.query(
Workflowstate.wf_id,
Workflowstate.state,
Workflowstate.timestamp,
Workflowstate.restart_count,
Workflowstate.status,
)
q = q.filter(Workflowstate.wf_id == self._root_wf_id).order_by(
Workflowstate.restart_count
)
return q.all()
def get_workflow_cum_job_wall_time(self):
"""
select sum(remote_duration * multiplier_factor) FROM
invocation as invoc, job_instance as ji WHERE
invoc.task_submit_seq >= 0 and
invoc.job_instance_id = ji.job_instance_id and
invoc.wf_id in (1,2,3) and
invoc.transformation <> 'condor::dagman'
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Workflowcumulativejobwalltime
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Workflowcumulativejobwalltime
"""
q = self.session.query(
cast(
func.sum(Invocation.remote_duration * JobInstance.multiplier_factor),
Float,
),
cast(
func.sum(
case(
[
(
Invocation.exitcode == 0,
Invocation.remote_duration
* JobInstance.multiplier_factor,
)
],
else_=0,
)
).label("goodput"),
Float,
),
cast(
func.sum(
case(
[
(
Invocation.exitcode > 0,
Invocation.remote_duration
* JobInstance.multiplier_factor,
)
],
else_=0,
)
).label("badput"),
Float,
),
)
q = q.filter(Invocation.task_submit_seq >= 0)
q = q.filter(Invocation.job_instance_id == JobInstance.job_instance_id)
if self._expand:
q = q.filter(Invocation.wf_id == Workflow.wf_id)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
else:
q = q.filter(Invocation.wf_id.in_(self._wfs))
q = q.filter(Invocation.transformation != "condor::dagman")
return q.first()
def get_summary_integrity_metrics(self):
"""
:param type: whether integrity type is check | compute
:param file_type: file type input or output
:return:
"""
q = self.session.query(
IntegrityMetrics.type,
func.sum(IntegrityMetrics.duration).label("duration"),
func.sum(IntegrityMetrics.count).label("count"),
)
q = q.group_by(IntegrityMetrics.type)
if self._expand:
q = q.filter(IntegrityMetrics.wf_id == Workflow.wf_id)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
else:
q = q.filter(IntegrityMetrics.wf_id.in_(self._wfs))
# at most two records grouped by type compute | check
return q.all()
def get_tag_metrics(self, name):
"""
:param name: what type of tag to aggregate on
:return:
"""
q = self.session.query(Tag.name, func.sum(Tag.count).label("count"))
q = q.group_by(Tag.name)
q = q.filter(Tag.name == name)
if self._expand:
q = q.filter(Tag.wf_id == Workflow.wf_id)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
else:
q = q.filter(Tag.wf_id.in_(self._wfs))
return q.all()
def get_integrity_metrics(self):
"""
:param type: whether integrity type is check | compute
:param file_type: file type input or output
:return:
"""
q = self.session.query(
IntegrityMetrics.type,
IntegrityMetrics.file_type,
func.sum(IntegrityMetrics.duration).label("duration"),
func.sum(IntegrityMetrics.count).label("count"),
)
q = q.group_by(IntegrityMetrics.type)
q = q.group_by(IntegrityMetrics.file_type)
if self._expand:
q = q.filter(IntegrityMetrics.wf_id == Workflow.wf_id)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
else:
q = q.filter(IntegrityMetrics.wf_id.in_(self._wfs))
"""
for result in q.all():
print result
print result.type
print result.file_type
"""
return q.all()
def get_submit_side_job_wall_time(self):
"""
select sum(local_duration * multiplier_factor) FROM
job_instance as jb_inst, job as jb WHERE
jb_inst.job_id = jb.job_id and
jb.wf_id in (1,2,3) and
((not (jb.type_desc ='dax' or jb.type_desc ='dag'))
or
((jb.type_desc ='dax' or jb.type_desc ='dag') and jb_inst.subwf_id is NULL)
)
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Cumulativejobwalltimeasseenfromsubmitside
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Cumulativejobwalltimeasseenfromsubmitside
"""
q = self.session.query(
cast(
func.sum(JobInstance.local_duration * JobInstance.multiplier_factor),
Float,
).label("wall_time"),
cast(
func.sum(
case(
[
(
JobInstance.exitcode == 0,
JobInstance.local_duration
* JobInstance.multiplier_factor,
)
],
else_=0,
)
).label("goodput"),
Float,
),
cast(
func.sum(
case(
[
(
JobInstance.exitcode > 0,
JobInstance.local_duration
* JobInstance.multiplier_factor,
)
],
else_=0,
)
).label("badput"),
Float,
),
)
q = q.filter(JobInstance.job_id == Job.job_id)
if self._expand:
q = q.filter(Job.wf_id == Workflow.wf_id)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
else:
q = q.filter(Job.wf_id.in_(self._wfs))
if self._expand:
d_or_d = self._dax_or_dag_cond()
q = q.filter(
or_(not_(d_or_d), and_(d_or_d, JobInstance.subwf_id == None))
) # noqa: E711
return q.first()
def get_workflow_details(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Workflowdetails
"""
q = self.session.query(
Workflow.wf_id,
Workflow.wf_uuid,
Workflow.parent_wf_id,
Workflow.root_wf_id,
Workflow.dag_file_name,
Workflow.submit_hostname,
Workflow.submit_dir,
Workflow.planner_arguments,
Workflow.user,
Workflow.grid_dn,
Workflow.planner_version,
Workflow.dax_label,
Workflow.dax_version,
)
q = q.filter(Workflow.wf_id.in_(self._wfs))
return q.all()
def get_workflow_retries(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Workflowretries
"""
sq_1 = self.session.query(func.max(Workflowstate.restart_count).label("retry"))
if self._expand and self._is_root_wf:
sq_1 = sq_1.filter(Workflow.root_wf_id == self._root_wf_id)
else:
sq_1 = sq_1.filter(Workflow.wf_id.in_(self._wfs))
sq_1 = sq_1.filter(Workflowstate.wf_id == Workflow.wf_id)
sq_1 = sq_1.group_by(Workflowstate.wf_id)
sq_1 = sq_1.subquery()
q = self.session.query(func.sum(sq_1.c.retry).label("total_retry"))
return q.one().total_retry
#
# Job Statistics
# These queries are documented:
# https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file
#
def get_job_statistics(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-All
"""
if self._expand:
return []
sq_1 = self.session.query(func.min(Jobstate.timestamp))
sq_1 = sq_1.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_1 = sq_1.filter(
or_(
Jobstate.state == "GRID_SUBMIT",
Jobstate.state == "GLOBUS_SUBMIT",
Jobstate.state == "EXECUTE",
)
)
sq_1 = sq_1.subquery()
sq_2 = self.session.query(Jobstate.timestamp)
sq_2 = sq_2.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_2 = sq_2.filter(Jobstate.state == "SUBMIT")
sq_2 = sq_2.subquery()
sq_3 = self.session.query(func.min(Jobstate.timestamp))
sq_3 = sq_3.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_3 = sq_3.filter(Jobstate.state == "EXECUTE")
sq_3 = sq_3.subquery()
sq_4 = self.session.query(func.min(Jobstate.timestamp))
sq_4 = sq_4.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_4 = sq_4.filter(
or_(Jobstate.state == "GRID_SUBMIT", Jobstate.state == "GLOBUS_SUBMIT")
)
sq_4 = sq_4.subquery()
sq_5 = self.session.query(func.sum(Invocation.remote_duration))
sq_5 = sq_5.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_5 = sq_5.filter(Invocation.wf_id == Job.wf_id).correlate(Job)
sq_5 = sq_5.filter(Invocation.task_submit_seq >= 0)
sq_5 = sq_5.group_by().subquery()
sq_6 = self.session.query(Jobstate.timestamp)
sq_6 = sq_6.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_6 = sq_6.filter(Jobstate.state == "POST_SCRIPT_TERMINATED")
sq_6 = sq_6.subquery()
sq_7 = self.session.query(func.max(Jobstate.timestamp))
sq_7 = sq_7.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_7 = sq_7.filter(
or_(
Jobstate.state == "POST_SCRIPT_STARTED",
Jobstate.state == "JOB_TERMINATED",
)
)
sq_7 = sq_7.subquery()
sq_8 = self.session.query(func.max(Invocation.exitcode))
sq_8 = sq_8.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_8 = sq_8.filter(Invocation.wf_id == Job.wf_id).correlate(Job)
# PM-704 the task submit sequence needs to be >= -1 to include prescript status
sq_8 = sq_8.filter(Invocation.task_submit_seq >= -1)
sq_8 = sq_8.group_by().subquery()
JobInstanceSub = orm.aliased(JobInstance)
sq_9 = self.session.query(Host.hostname)
sq_9 = sq_9.filter(
JobInstanceSub.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_9 = sq_9.filter(Host.host_id == JobInstanceSub.host_id)
sq_9 = sq_9.subquery()
JI = orm.aliased(JobInstance)
sq_10 = self.session.query(
func.sum(Invocation.remote_duration * JI.multiplier_factor)
)
sq_10 = sq_10.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_10 = sq_10.filter(Invocation.job_instance_id == JI.job_instance_id)
sq_10 = sq_10.filter(Invocation.wf_id == Job.wf_id).correlate(Job)
sq_10 = sq_10.filter(Invocation.task_submit_seq >= 0)
sq_10 = sq_10.group_by().subquery()
sq_11 = self.session.query(func.sum(Invocation.remote_cpu_time))
sq_11 = sq_11.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_11 = sq_11.filter(Invocation.wf_id == Job.wf_id).correlate(Job)
sq_11 = sq_11.filter(Invocation.task_submit_seq >= 0)
sq_11 = sq_11.group_by().subquery()
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
Job.exec_job_id.label("job_name"),
JobInstance.site,
cast(sq_1.as_scalar() - sq_2.as_scalar(), Float).label("condor_q_time"),
cast(sq_3.as_scalar() - sq_4.as_scalar(), Float).label("resource_delay"),
cast(JobInstance.local_duration, Float).label("runtime"),
cast(sq_5.as_scalar(), Float).label("kickstart"),
cast(sq_6.as_scalar() - sq_7.as_scalar(), Float).label("post_time"),
cast(JobInstance.cluster_duration, Float).label("seqexec"),
sq_8.as_scalar().label("exit_code"),
sq_9.as_scalar().label("host_name"),
JobInstance.multiplier_factor,
cast(sq_10.as_scalar(), Float).label("kickstart_multi"),
sq_11.as_scalar().label("remote_cpu_time"),
)
q = q.filter(JobInstance.job_id == Job.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def _state_sub_q(self, states, function=None):
sq = None
if not function:
sq = self.session.query(Jobstate.timestamp)
elif function == "max":
sq = self.session.query(func.max(Jobstate.timestamp))
elif function == "min":
sq = self.session.query(func.min(Jobstate.timestamp))
sq = sq.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq = sq.filter(Jobstate.state.in_(states)).subquery()
return sq
def get_job_states(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-JobStates
"""
if self._expand:
return []
sq_1 = (
self.session.query(Host.hostname)
.filter(Host.host_id == JobInstance.host_id)
.correlate(JobInstance)
.subquery()
)
# select min(timestamp) from jobstate where job_instance_id = jb_inst.job_instance_id
# ) as jobS ,
# (
# select max(timestamp)-min(timestamp) from jobstate where job_instance_id = jb_inst.job_instance_id
# ) as jobDuration,
sq_jobS = self.session.query(func.min(Jobstate.timestamp))
sq_jobS = (
sq_jobS.filter(Jobstate.job_instance_id == JobInstance.job_instance_id)
.correlate(JobInstance)
.subquery()
)
sq_jobD = self.session.query(
func.max(Jobstate.timestamp) - func.min(Jobstate.timestamp)
)
sq_jobD = (
sq_jobD.filter(Jobstate.job_instance_id == JobInstance.job_instance_id)
.correlate(JobInstance)
.subquery()
)
sq_2 = self._state_sub_q(["PRE_SCRIPT_STARTED"])
sq_3 = self._state_sub_q(["PRE_SCRIPT_TERMINATED"])
sq_4 = self._state_sub_q(["PRE_SCRIPT_STARTED"])
sq_5 = self._state_sub_q(["SUBMIT"])
sq_6 = self._state_sub_q(["JOB_TERMINATED"])
sq_7 = self._state_sub_q(["GRID_SUBMIT", "GLOBUS_SUBMIT"], "max")
sq_8 = self._state_sub_q(["EXECUTE"], "min")
sq_9 = self._state_sub_q(["EXECUTE", "SUBMIT"], "max")
sq_10 = self._state_sub_q(["JOB_TERMINATED"])
sq_11 = self.session.query(func.min(Invocation.start_time))
sq_11 = sq_11.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_11 = sq_11.filter(Invocation.wf_id == Job.wf_id).correlate(Job)
sq_11 = sq_11.filter(Invocation.task_submit_seq >= 0)
sq_11 = sq_11.group_by(Invocation.job_instance_id).subquery()
sq_12 = self.session.query(func.sum(Invocation.remote_duration))
sq_12 = sq_12.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_12 = sq_12.filter(Invocation.wf_id == Job.wf_id).correlate(Job)
sq_12 = sq_12.filter(Invocation.task_submit_seq >= 0)
sq_12 = sq_12.group_by(Invocation.job_instance_id).subquery()
sq_13 = self._state_sub_q(["POST_SCRIPT_STARTED", "JOB_TERMINATED"], "max")
sq_14 = self._state_sub_q(["POST_SCRIPT_TERMINATED"])
sq_15 = self.session.query(
func.group_concat(func.distinct(Invocation.transformation))
)
sq_15 = sq_15.filter(Invocation.wf_id.in_(self._wfs))
sq_15 = sq_15.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_15 = sq_15.filter(Invocation.transformation != "dagman::post")
sq_15 = sq_15.filter(Invocation.transformation != "dagman::pre")
sq_15 = sq_15.subquery()
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
Job.exec_job_id.label("job_name"),
JobInstance.site,
sq_1.as_scalar().label("host_name"),
cast(sq_jobS.as_scalar(), Float).label("jobS"),
cast(sq_jobD.as_scalar(), Float).label("jobDuration"),
cast(sq_2.as_scalar(), Float).label("pre_start"),
cast(sq_3.as_scalar() - sq_4.as_scalar(), Float).label("pre_duration"),
cast(sq_5.as_scalar(), Float).label("condor_start"),
cast(sq_6.as_scalar() - sq_5.as_scalar(), Float).label("condor_duration"),
cast(sq_7.as_scalar(), Float).label("grid_start"),
cast(sq_8.as_scalar() - sq_7.as_scalar(), Float).label("grid_duration"),
cast(sq_9.as_scalar(), Float).label("exec_start"),
cast(sq_10.as_scalar() - sq_9.as_scalar(), Float).label("exec_duration"),
cast(sq_11.as_scalar(), Float).label("kickstart_start"),
cast(sq_12.as_scalar(), Float).label("kickstart_duration"),
cast(sq_13.as_scalar(), Float).label("post_start"),
cast(sq_14.as_scalar() - sq_13.as_scalar(), Float).label("post_duration"),
sq_15.as_scalar().label("transformation"),
)
q = q.filter(JobInstance.job_id == Job.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def get_job_instance_sub_wf_map(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Subworkflowjobinstancesmapping
"""
if self._expand:
return []
q = self.session.query(JobInstance.job_instance_id, JobInstance.subwf_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(self._dax_or_dag_cond())
return q.all()
def get_failed_job_instances(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Failedjobinstances
"""
# PM-752 we use the same query that we used to get the count of failed jobs
q = self._get_total_failed_jobs_status()
return q.all()
def get_plots_failed_job_instances(self, final=False, all_jobs=False):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Failedjobinstances
used in the pegasus plots code. is deprecated
"""
if self._expand:
return []
d_or_d = self._dax_or_dag_cond()
if not final:
q = self.session.query(
JobInstance.job_instance_id, JobInstance.job_submit_seq
)
else:
q = self.session.query(
JobInstance.job_instance_id, func.max(JobInstance.job_submit_seq)
)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.filter(Job.job_id == JobInstance.job_id)
if not all_jobs:
q = q.filter(
or_(not_(d_or_d), and_(d_or_d, JobInstance.subwf_id == None))
) # noqa: E711
q = q.filter(JobInstance.exitcode != 0).filter(
JobInstance.exitcode != None
) # noqa: E711
if final:
q = q.group_by(JobInstance.job_id)
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def get_job_instance_info(self, job_instance_id=None):
"""
Job instance information. Pulls all or for one instance.
https://confluence.pegasus.isi.edu/pages/viewpage.action?pageId=14876831
"""
if self._expand:
return []
sq_0 = self.session.query(Workflow.submit_dir)
sq_0 = sq_0.filter(Workflow.wf_id == JobInstance.subwf_id).correlate(
JobInstance
)
sq_0 = sq_0.subquery()
sq_1 = self.session.query(Job.exec_job_id)
sq_1 = sq_1.filter(Job.job_id == JobInstance.job_id).correlate(JobInstance)
sq_1 = sq_1.subquery()
sq_2 = self.session.query(Job.submit_file)
sq_2 = sq_2.filter(Job.job_id == JobInstance.job_id).correlate(JobInstance)
sq_2 = sq_2.subquery()
sq_3 = self.session.query(Job.executable)
sq_3 = sq_3.filter(Job.job_id == JobInstance.job_id).correlate(JobInstance)
sq_3 = sq_3.subquery()
sq_4 = self.session.query(Job.argv)
sq_4 = sq_4.filter(Job.job_id == JobInstance.job_id).correlate(JobInstance)
sq_4 = sq_4.subquery()
sq_5 = self.session.query(Workflow.submit_dir)
sq_5 = sq_5.filter(Workflow.wf_id == self._root_wf_id).subquery()
sq_6 = self.session.query(
func.max(Jobstate.jobstate_submit_seq).label("max_job_submit_seq")
)
sq_6 = sq_6.filter(Jobstate.job_instance_id == job_instance_id)
sq_6 = sq_6.subquery()
sq_7 = self.session.query(Jobstate.state)
sq_7 = sq_7.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_7 = sq_7.filter(Jobstate.jobstate_submit_seq == sq_6.as_scalar())
sq_7 = sq_7.subquery()
sq_8 = self.session.query(Invocation.executable)
sq_8 = sq_8.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_8 = sq_8.filter(Invocation.task_submit_seq == -1)
sq_8 = sq_8.subquery()
sq_9 = self.session.query(Invocation.argv)
sq_9 = sq_9.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_9 = sq_9.filter(Invocation.task_submit_seq == -1)
sq_9 = sq_9.subquery()
sq_10 = self.session.query(Host.hostname)
sq_10 = sq_10.filter(Host.host_id == JobInstance.host_id).correlate(JobInstance)
sq_10 = sq_10.subquery()
q = self.session.query(
JobInstance.job_instance_id,
JobInstance.site,
JobInstance.stdout_file,
JobInstance.stderr_file,
JobInstance.stdout_text,
JobInstance.stderr_text,
JobInstance.work_dir,
sq_0.as_scalar().label("subwf_dir"),
sq_1.as_scalar().label("job_name"),
sq_2.as_scalar().label("submit_file"),
sq_3.as_scalar().label("executable"),
sq_4.as_scalar().label("argv"),
sq_5.as_scalar().label("submit_dir"),
sq_7.as_scalar().label("state"),
sq_8.as_scalar().label("pre_executable"),
sq_9.as_scalar().label("pre_argv"),
sq_10.as_scalar().label("hostname"),
)
if job_instance_id:
q = q.filter(JobInstance.job_instance_id == job_instance_id)
return q.all()
def get_invocation_info(self, ji_id=None):
"""
SELECT task_submit_seq, exitcode, executable, argv, transformation, abs_task_id
FROM invocation WHERE job_instance_id = 7 and wf_id = 1
"""
if self._expand or not ji_id:
return []
q = self.session.query(
Invocation.task_submit_seq,
Invocation.exitcode,
Invocation.executable,
Invocation.argv,
Invocation.transformation,
Invocation.abs_task_id,
)
q = q.filter(Invocation.job_instance_id == ji_id)
q = q.filter(Invocation.wf_id.in_(self._wfs))
return q.all()
def get_job_name(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Name
"""
if self._expand:
return []
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
Job.exec_job_id.label("job_name"),
)
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(Job.wf_id.in_(self._wfs)).order_by(JobInstance.job_submit_seq)
return q.all()
def get_job_site(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Site
"""
if self._expand:
return []
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
JobInstance.site,
)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.filter(Job.job_id == JobInstance.job_id).group_by(Job.job_id)
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def get_job_kickstart(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Kickstart
"""
if self._expand:
return []
sq_1 = self.session.query(
func.sum(Invocation.remote_duration * JobInstance.multiplier_factor)
)
sq_1 = sq_1.filter(
Invocation.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_1 = sq_1.filter(Invocation.wf_id == Job.wf_id).correlate(Job)
sq_1 = sq_1.filter(Invocation.task_submit_seq >= 0)
sq_1 = sq_1.group_by(Invocation.job_instance_id)
sq_1 = sq_1.subquery()
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
cast(sq_1.as_scalar(), Float).label("kickstart"),
)
q = q.filter(JobInstance.job_id == Job.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def get_job_runtime(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Runtime
"""
if self._expand:
return []
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
JobInstance.local_duration.label("runtime"),
)
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.group_by(Job.job_id).order_by(JobInstance.job_submit_seq)
return q.all()
def get_job_seqexec(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Seqexec
"""
if self._expand:
return []
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
JobInstance.cluster_duration,
)
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.filter(Job.clustered != 0)
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def get_condor_q_time(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-CondorQTime
"""
if self._expand:
return []
sq_1 = self.session.query(func.min(Jobstate.timestamp))
sq_1 = sq_1.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_1 = sq_1.filter(
or_(
Jobstate.state == "GRID_SUBMIT",
Jobstate.state == "GLOBUS_SUBMIT",
Jobstate.state == "EXECUTE",
)
)
sq_1 = sq_1.subquery()
sq_2 = self.session.query(Jobstate.timestamp)
sq_2 = sq_2.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_2 = sq_2.filter(Jobstate.state == "SUBMIT")
sq_2 = sq_2.subquery()
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
cast(sq_1.as_scalar() - sq_2.as_scalar(), Float).label("condor_q_time"),
)
q = q.filter(JobInstance.job_id == Job.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def get_resource_delay(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Resource
"""
if self._expand:
return []
sq_1 = self.session.query(func.min(Jobstate.timestamp))
sq_1 = sq_1.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_1 = sq_1.filter(Jobstate.state == "EXECUTE")
sq_1 = sq_1.subquery()
sq_2 = self.session.query(Jobstate.timestamp)
sq_2 = sq_2.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_2 = sq_2.filter(
or_(Jobstate.state == "GRID_SUBMIT", Jobstate.state == "GLOBUS_SUBMIT")
)
sq_2 = sq_2.subquery()
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
cast(sq_1.as_scalar() - sq_2.as_scalar(), Float).label("resource_delay"),
)
q = q.filter(JobInstance.job_id == Job.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
def get_post_time(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Post
"""
if self._expand:
return []
sq_1 = self.session.query(Jobstate.timestamp)
sq_1 = sq_1.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_1 = sq_1.filter(Jobstate.state == "POST_SCRIPT_TERMINATED")
sq_1 = sq_1.subquery()
sq_2 = self.session.query(func.max(Jobstate.timestamp))
sq_2 = sq_2.filter(
Jobstate.job_instance_id == JobInstance.job_instance_id
).correlate(JobInstance)
sq_2 = sq_2.filter(
or_(
Jobstate.state == "POST_SCRIPT_STARTED",
Jobstate.state == "JOB_TERMINATED",
)
)
sq_2 = sq_2.subquery()
q = self.session.query(
Job.job_id,
JobInstance.job_instance_id,
JobInstance.job_submit_seq,
cast(sq_1.as_scalar() - sq_2.as_scalar(), Float).label("post_time"),
)
q = q.filter(JobInstance.job_id == Job.job_id)
q = q.filter(Job.wf_id.in_(self._wfs))
q = q.order_by(JobInstance.job_submit_seq)
return q.all()
#
# This query documented:
# https://confluence.pegasus.isi.edu/display/pegasus/Transformation+Statistics+file
#
def get_transformation_statistics(self):
"""
SELECT transformation,
count(invocation_id) as count,
min(remote_duration * multiplier_factor) as min,
count(CASE WHEN (invoc.exitcode = 0 and invoc.exitcode is NOT NULL) THEN invoc.exitcode END) AS success,
count(CASE WHEN (invoc.exitcode != 0 and invoc.exitcode is NOT NULL) THEN invoc.exitcode END) AS failure,
max(remote_duration * multiplier_factor) as max,
avg(remote_duration * multiplier_factor) as avg,
sum(remote_duration * multiplier_factor) as sum
FROM
invocation as invoc, job_instance as ji WHERE
invoc.job_instance_id = ji.job_instance_id and
invoc.wf_id IN (1,2,3) GROUP BY transformation
"""
q = self.session.query(
Invocation.transformation,
case([(Invocation.exitcode == 0, "successful")], else_="failed").label(
"type"
),
func.count(Invocation.invocation_id).label("count"),
func.count(case([(Invocation.exitcode == 0, Invocation.exitcode)])).label(
"success"
),
func.count(case([(Invocation.exitcode != 0, Invocation.exitcode)])).label(
"failure"
),
# runtime
cast(
func.min(Invocation.remote_duration * JobInstance.multiplier_factor),
Float,
).label("min"),
cast(
func.max(Invocation.remote_duration * JobInstance.multiplier_factor),
Float,
).label("max"),
cast(
func.avg(Invocation.remote_duration * JobInstance.multiplier_factor),
Float,
).label("avg"),
cast(
func.sum(Invocation.remote_duration * JobInstance.multiplier_factor),
Float,
).label("sum"),
# maxrss
func.min(Invocation.maxrss).label("min_maxrss"),
func.max(Invocation.maxrss).label("max_maxrss"),
cast(func.avg(Invocation.maxrss), Float,).label("avg_maxrss"),
# avg_cpu
cast(func.min(Invocation.avg_cpu), Float,).label("min_avg_cpu"),
cast(func.max(Invocation.avg_cpu), Float,).label("max_avg_cpu"),
cast(func.avg(Invocation.avg_cpu), Float,).label("avg_avg_cpu"),
)
q = q.filter(Invocation.job_instance_id == JobInstance.job_instance_id)
q = q.filter(Invocation.wf_id.in_(self._wfs))
q = q.group_by(Invocation.transformation).group_by("type")
q = q.order_by(Invocation.transformation)
return q.all()
#
# Runtime queries
# https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries
#
def _get_date_divisors(self):
vals = {"month": 2629743, "week": 604800, "day": 86400, "hour": 3600}
return vals[self._time_filter_mode]
def _get_host_filter(self):
if self._host_filter is None:
return None
elif isinstance(self._host_filter, str):
return Host.hostname == self._host_filter
elif isinstance(self._host_filter, type([])):
return Host.hostname.in_(self._host_filter)
else:
return None
def _get_xform_filter(self):
if (
self._xform_filter["include"] is not None
and self._xform_filter["exclude"] is not None
):
self.log.error(
"Can't set both transform include and exclude - reset s.set_transformation_filter()"
)
return None
elif (
self._xform_filter["include"] is None
and self._xform_filter["exclude"] is None
):
return None
elif self._xform_filter["include"] is not None:
if isinstance(self._xform_filter["include"], str):
return Invocation.transformation == self._xform_filter["include"]
elif isinstance(self._xform_filter["include"], type([])):
return Invocation.transformation.in_(self._xform_filter["include"])
else:
return None
elif self._xform_filter["exclude"] is not None:
if isinstance(self._xform_filter["exclude"], str):
return Invocation.transformation != self._xform_filter["exclude"]
elif isinstance(self._xform_filter["exclude"], type([])):
return not_(
Invocation.transformation.in_(self._xform_filter["exclude"])
)
else:
return None
else:
return None
def get_invocation_by_time(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries
"""
q = self.session.query(
(cast(Invocation.start_time / self._get_date_divisors(), Integer)).label(
"date_format"
),
func.count(Invocation.invocation_id).label("count"),
cast(func.sum(Invocation.remote_duration), Float).label("total_runtime"),
)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
q = q.filter(Invocation.wf_id == Workflow.wf_id)
if self._get_xform_filter() is not None:
q = q.filter(self._get_xform_filter())
q = q.group_by("date_format").order_by("date_format")
return q.all()
def get_jobs_run_by_time(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries
"""
q = self.session.query(
(cast(Jobstate.timestamp / self._get_date_divisors(), Integer)).label(
"date_format"
),
func.count(JobInstance.job_instance_id).label("count"),
cast(func.sum(JobInstance.local_duration), Float).label("total_runtime"),
)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
q = q.filter(Workflow.wf_id == Job.wf_id)
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(Jobstate.job_instance_id == JobInstance.job_instance_id)
q = q.filter(Jobstate.state == "EXECUTE")
q = q.filter(JobInstance.local_duration != None) # noqa: E711
if self._get_job_filter() is not None:
q = q.filter(self._get_job_filter())
q = q.group_by("date_format").order_by("date_format")
return q.all()
def get_invocation_by_time_per_host(self, host=None):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries
"""
q = self.session.query(
(cast(Invocation.start_time / self._get_date_divisors(), Integer)).label(
"date_format"
),
Host.hostname.label("host_name"),
func.count(Invocation.invocation_id).label("count"),
cast(func.sum(Invocation.remote_duration), Float).label("total_runtime"),
)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
q = q.filter(Invocation.wf_id == Workflow.wf_id)
q = q.filter(JobInstance.job_instance_id == Invocation.job_instance_id)
q = q.filter(JobInstance.host_id == Host.host_id)
if self._get_host_filter() is not None:
q = q.filter(self._get_host_filter())
if self._get_xform_filter() is not None:
q = q.filter(self._get_xform_filter())
q = q.group_by("date_format", "host_name").order_by("date_format")
return q.all()
def get_jobs_run_by_time_per_host(self):
"""
https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries
"""
q = self.session.query(
(cast(Jobstate.timestamp / self._get_date_divisors(), Integer)).label(
"date_format"
),
Host.hostname.label("host_name"),
func.count(JobInstance.job_instance_id).label("count"),
cast(func.sum(JobInstance.local_duration), Float).label("total_runtime"),
)
q = q.filter(Workflow.root_wf_id == self._root_wf_id)
q = q.filter(Workflow.wf_id == Job.wf_id)
q = q.filter(Job.job_id == JobInstance.job_id)
q = q.filter(Jobstate.job_instance_id == JobInstance.job_instance_id)
q = q.filter(Jobstate.state == "EXECUTE")
q = q.filter(JobInstance.host_id == Host.host_id)
if self._get_host_filter() is not None:
q = q.filter(self._get_host_filter())
if self._get_job_filter() is not None:
q = q.filter(self._get_job_filter())
q = q.group_by("date_format", "host_name").order_by("date_format")
return q.all()
|
en
| 0.636561
|
Library to generate statistics from the new Stampede 3.1 backend. Usage:: stats = StampedeStatistics(connString='sqlite:///montage.db') stats.initialize('unique_wf_uuid') stats.set_job_filter('dax') print stats.get_total_jobs_status() print stats.get_total_succeeded_jobs_status() stats.set_job_filter('dag') print stats.get_total_jobs_status() print stats.get_total_succeeded_jobs_status() etc. stats.close() Constructor and initialize methods: The constructor takes a required sqlalchemy connection string as the first argument. The stats class will default to returning data in the "expanded workflow" mode. To change this behavior and only analyize a single workflow set the optional arg: expand_workflow = False along with the connection string argument. The initialize method is called with a single argument - the wf_uuid of the desired "root workflow" whether returning data in expanded mode or not. The method will return True or False if a query exception is raised so the programmer can test for success before calling the subsequent query methods. This method is intended to be called once per object. Job filtering: Jobs can be filtered using any of the strings in the jobtype ENUM, with the addition of the values 'all' and 'nonsub' which will return all jobs and non-subworkflow jobs respectively. If the filter is not explicitly set, it will default to the 'all' mode. The desired filter can be set with the set_job_filter() method. After setting this method, all subsequent calls to the query methods will return results according to the filter. This can be set and reset as many times as the user desires. There is an example of re/setting the job filter in the usage section above. The query methods will return different values after the filter is re/set. Time filtering: This behaves much like job filtering. For the runtime queries, the time intervals 'month', 'week', 'day', and 'hour' can be set using the set_time_filter() method. If this method is not set, it will default to the 'month' interval for filtering. Hostname filtering: For the runtime queries the method set_host_filter() can be used to filter by various hosts. This method differs from the job and time filtering methods in that the argument can be either a string (for a single hostname), or an array/list of hostnames for multiple hostnames. Example:: s.set_host_filter('butterfly.isi.edu') or s.set_host_filter(['engage-submit3.renci.org', 'node0012.palmetto.clemson.edu']) Either one of these variations will work. The first variation will only retrieve data for that one host, the second will return data for both hosts. If this method is not set, no hostname filtering will be done and information for all hosts will be returned. Transformation filtering: Transformation filtering works similarly to hostname filtering in that it can accept a single string value or a array/list of strings. However the set_transformation_filter() method accepts two keyword arguments - 'include' and 'exclude'. Only one of these keywords can be set per method call. Example:: s.set_transformation_filter(include='pegasus::dirmanager') s.set_transformation_filter(exclude=['dagman::post' , 'dagman::pre' ,'condor::dagman']) etc. This example demonstrates the two proper keyword invocations and that either a string or list may be used. If this method is not set, no filtering will be done and information for all transforms will be returned. Calling this method with no arguments will reset any previously set filters. Return values from methods: The return value types will vary from method to method. Most of the methods will return a single integer or floating point number. Methods which return rows from the DB (rather than just a number) will return a list which can be interacted with in one of two ways - either by array index (list of tuples) or by a named attr (list of objects). The two following methods of interacting with the same query results will both produce the same output: Example:: for row in s.get_job_kickstart(): print row[0], row[1], row[2] print row.job_id, row.job_name, row.kickstart Either syntax will work. When using the named attribute method, the attributes are the names of the columns/aliases in the SELECT stanza of the query. If the row returned by the method is printed, it will display as a tuple of results per row. Methods:: get_sub_workflow_ids get_descendant_workflow_ids get_schema_version get_total_jobs_status get_total_succeeded_failed_jobs_status get_total_succeeded_jobs_status get_total_failed_jobs_status get_total_jobs_retries get_total_tasks_status get_total_succeeded_tasks_status get_total_failed_tasks_status get_task_success_report get_task_failure_report get_total_tasks_retries get_workflow_states get_workflow_cum_job_wall_time get_submit_side_job_wall_time get_workflow_details get_workflow_retries get_job_statistics get_job_states get_job_instance_sub_wf_map get_failed_job_instances get_job_instance_info get_job_name get_job_site get_job_kickstart get_job_runtime get_job_seqexec get_condor_q_time get_resource_delay get_post_time get_transformation_statistics get_invocation_by_time get_jobs_run_by_time get_invocation_by_time_per_host get_jobs_run_by_time_per_host Methods listed in order of query list on wiki. https://confluence.pegasus.isi.edu/display/pegasus/Pegasus+Statistics+Python+Version+Modified # Main stats class. select parent_wf_id, wf_id from workflow where root_wf_id = (select root_wf_id from workflow where wf_id=self._root_wf_id); # @tree will hold the entire sub-work-flow dependency structure. # Initialize filters with default value If the root_wf_uuid given to initialize function is not the UUID of the root work-flow, and expand_workflow was set to True, then this recursive function determines all child work-flows. @tree A dictionary when key is the parent_wf_id and value is a list of its child wf_id's. @wf_node The node for which to determine descendants. The host argument can either be a string/single hostname or it can be a list/array of hostnames. Either of these args can either be a single string/xform type or it can be a list/array of xform types. Both arguments can not be set at the same time. If they are, the program will log an error and not do any filtering. # # Pulls information about sub workflows # Returns info on child workflows only. # # Status of initially planned wf components. # # # The following block of queries are documented here: # https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary # and # https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file # Creates the following subquery that is used in several queries: and jb_inst.job_submit_seq = ( select max(job_submit_seq) from job_instance where job_id = jb_inst.job_id group by job_id ) https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totaljobs https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totaljobs https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totalsucceeded_failed_jobs https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totalsucceededfailedjobs SELECT DISTINCT count( job_instance_id) FROM jobstate j JOIN ( SELECT max(job_instance_id) as maxid FROM job_instance GROUP BY job_id) max_ji ON j.job_instance_id=max_ji.maxid WHERE j.state = 'JOB_HELD'; https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totalsucceededjobs https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totalsucceededjobs # noqa: E711 https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totalfailedjobs https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totalfailedjobs # noqa: E711 # noqa: E711 The states arg is a list of strings. Returns an appropriate subquery. https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-TotalJobRetries https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-TotalJobRetries https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totaltask https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totaltasks https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totalsucceededtasks https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totalsucceededtasks # This query generation method is obsolete and is only being # kept for optimization reference. # noqa: E711 # Calling wrapper methods would invoke like so: # q = self._base_task_status_query() # q = q.filter(Invocation.exitcode == 0) # return q.count() # PM-713 - Change to func.count(distinct(Invocation.abs_task_id)) from func.count(Invocation.exitcode) # noqa: E711 https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Totaltaskretries https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Totaltaskretries # noqa: E711 # # Run statistics # https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Workflowwalltime https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Workflowwalltime select sum(remote_duration * multiplier_factor) FROM invocation as invoc, job_instance as ji WHERE invoc.task_submit_seq >= 0 and invoc.job_instance_id = ji.job_instance_id and invoc.wf_id in (1,2,3) and invoc.transformation <> 'condor::dagman' https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Workflowcumulativejobwalltime https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Workflowcumulativejobwalltime :param type: whether integrity type is check | compute :param file_type: file type input or output :return: # at most two records grouped by type compute | check :param name: what type of tag to aggregate on :return: :param type: whether integrity type is check | compute :param file_type: file type input or output :return: for result in q.all(): print result print result.type print result.file_type select sum(local_duration * multiplier_factor) FROM job_instance as jb_inst, job as jb WHERE jb_inst.job_id = jb.job_id and jb.wf_id in (1,2,3) and ((not (jb.type_desc ='dax' or jb.type_desc ='dag')) or ((jb.type_desc ='dax' or jb.type_desc ='dag') and jb_inst.subwf_id is NULL) ) https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Summary#WorkflowSummary-Cumulativejobwalltimeasseenfromsubmitside https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Cumulativejobwalltimeasseenfromsubmitside # noqa: E711 https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Workflowdetails https://confluence.pegasus.isi.edu/display/pegasus/Workflow+Statistics+file#WorkflowStatisticsfile-Workflowretries # # Job Statistics # These queries are documented: # https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file # https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-All # PM-704 the task submit sequence needs to be >= -1 to include prescript status https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-JobStates # select min(timestamp) from jobstate where job_instance_id = jb_inst.job_instance_id # ) as jobS , # ( # select max(timestamp)-min(timestamp) from jobstate where job_instance_id = jb_inst.job_instance_id # ) as jobDuration, https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Subworkflowjobinstancesmapping https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Failedjobinstances # PM-752 we use the same query that we used to get the count of failed jobs https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Failedjobinstances used in the pegasus plots code. is deprecated # noqa: E711 # noqa: E711 Job instance information. Pulls all or for one instance. https://confluence.pegasus.isi.edu/pages/viewpage.action?pageId=14876831 SELECT task_submit_seq, exitcode, executable, argv, transformation, abs_task_id FROM invocation WHERE job_instance_id = 7 and wf_id = 1 https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Name https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Site https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Kickstart https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Runtime https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Seqexec https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-CondorQTime https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Resource https://confluence.pegasus.isi.edu/display/pegasus/Job+Statistics+file#JobStatisticsfile-Post # # This query documented: # https://confluence.pegasus.isi.edu/display/pegasus/Transformation+Statistics+file # SELECT transformation, count(invocation_id) as count, min(remote_duration * multiplier_factor) as min, count(CASE WHEN (invoc.exitcode = 0 and invoc.exitcode is NOT NULL) THEN invoc.exitcode END) AS success, count(CASE WHEN (invoc.exitcode != 0 and invoc.exitcode is NOT NULL) THEN invoc.exitcode END) AS failure, max(remote_duration * multiplier_factor) as max, avg(remote_duration * multiplier_factor) as avg, sum(remote_duration * multiplier_factor) as sum FROM invocation as invoc, job_instance as ji WHERE invoc.job_instance_id = ji.job_instance_id and invoc.wf_id IN (1,2,3) GROUP BY transformation # runtime # maxrss # avg_cpu # # Runtime queries # https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries # https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries # noqa: E711 https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries https://confluence.pegasus.isi.edu/display/pegasus/Additional+queries
| 2.425778
| 2
|
jet_bridge/__init__.py
|
BradyBromley/jet-bridge
| 1
|
6625886
|
VERSION = '0.4.8'
|
VERSION = '0.4.8'
|
none
| 1
| 1.112639
| 1
|
|
assemblyline/assemblyline/al/service/list_queue_sizes.py
|
dendisuhubdy/grokmachine
| 46
|
6625887
|
#!/usr/bin/env python
import logging
from assemblyline.al.common import forge
ds = None
log = logging.getLogger('assemblyline.al.service')
def get_service_queue_length(service_name):
# noinspection PyBroadException
try:
svc_queue = forge.get_service_queue(service_name.split(".")[-1])
return svc_queue.length()
except:
return -1
def get_service_queue_lengths():
global ds # pylint: disable=W0603
if not ds:
ds = forge.get_datastore()
# Default is to return all services in a dict of class_name: queue_size.
queue_lengths = {}
services = ds.list_services()
for svc in services:
# noinspection PyBroadException
try:
if not svc:
continue
classpath = svc.get('classpath', "al_services.%s.%s" % (svc['repo'], svc['class_name']))
queue_lengths[svc['name']] = get_service_queue_length(classpath)
except Exception: # pylint: disable=W0703
log.exception('while getting queue length for %s', svc['name'])
return queue_lengths
if __name__ == '__main__':
import pprint
pprint.pprint(get_service_queue_lengths())
|
#!/usr/bin/env python
import logging
from assemblyline.al.common import forge
ds = None
log = logging.getLogger('assemblyline.al.service')
def get_service_queue_length(service_name):
# noinspection PyBroadException
try:
svc_queue = forge.get_service_queue(service_name.split(".")[-1])
return svc_queue.length()
except:
return -1
def get_service_queue_lengths():
global ds # pylint: disable=W0603
if not ds:
ds = forge.get_datastore()
# Default is to return all services in a dict of class_name: queue_size.
queue_lengths = {}
services = ds.list_services()
for svc in services:
# noinspection PyBroadException
try:
if not svc:
continue
classpath = svc.get('classpath', "al_services.%s.%s" % (svc['repo'], svc['class_name']))
queue_lengths[svc['name']] = get_service_queue_length(classpath)
except Exception: # pylint: disable=W0703
log.exception('while getting queue length for %s', svc['name'])
return queue_lengths
if __name__ == '__main__':
import pprint
pprint.pprint(get_service_queue_lengths())
|
en
| 0.436893
|
#!/usr/bin/env python # noinspection PyBroadException # pylint: disable=W0603 # Default is to return all services in a dict of class_name: queue_size. # noinspection PyBroadException # pylint: disable=W0703
| 2.12044
| 2
|
datacube/testutils/io.py
|
agdc-research-trial/gdf
| 1
|
6625888
|
# This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2020 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import toolz
from ..model import Dataset
from ..storage import reproject_and_fuse, BandInfo
from ..storage._rio import RasterioDataSource, RasterDatasetDataSource
from ..utils.geometry._warp import resampling_s2rio
from ..storage._read import rdr_geobox
from ..utils.geometry import GeoBox
from ..utils.geometry import gbox as gbx
from ..index.eo3 import is_doc_eo3, _norm_grid # type: ignore[attr-defined]
from types import SimpleNamespace
class RasterFileDataSource(RasterioDataSource):
""" This is only used in test code
"""
def __init__(self, filename, bandnumber, nodata=None, crs=None, transform=None, lock=None):
super(RasterFileDataSource, self).__init__(filename, nodata, lock=lock)
self.bandnumber = bandnumber
self.crs = crs
self.transform = transform
def get_bandnumber(self, src):
return self.bandnumber
def get_transform(self, shape):
if self.transform is None:
raise RuntimeError('No transform in the data and no fallback')
return self.transform
def get_crs(self):
if self.crs is None:
raise RuntimeError('No CRS in the data and no fallback')
return self.crs
def _raster_metadata(band):
source = RasterDatasetDataSource(band)
with source.open() as rdr:
return SimpleNamespace(dtype=rdr.dtype.name,
nodata=rdr.nodata,
geobox=rdr_geobox(rdr))
def get_raster_info(ds: Dataset, measurements=None):
"""
:param ds: Dataset
:param measurements: List of band names to load
"""
if measurements is None:
measurements = list(ds.type.measurements)
return {n: _raster_metadata(BandInfo(ds, n))
for n in measurements}
def eo3_geobox(ds: Dataset, band: str) -> GeoBox:
mm = ds.measurements.get(ds.type.canonical_measurement(band),
None)
if mm is None:
raise ValueError(f"No such band: {band}")
crs = ds.crs
doc_path = ('grids', mm.get('grid', 'default'))
grid = toolz.get_in(doc_path, ds.metadata_doc)
if crs is None or grid is None:
raise ValueError('Not a valid EO3 dataset')
grid = _norm_grid(grid)
h, w = grid.shape
return GeoBox(w, h, grid.transform, crs)
def native_geobox(ds, measurements=None, basis=None):
"""Compute native GeoBox for a set of bands for a given dataset
:param ds: Dataset
:param measurements: List of band names to consider
:param basis: Name of the band to use for computing reference frame, other
bands might be reprojected if they use different pixel grid
:return: GeoBox describing native storage coordinates.
"""
gs = ds.type.grid_spec
if gs is not None:
# Dataset is from ingested product, figure out GeoBox of the tile this dataset covers
bb = [gbox for _, gbox in gs.tiles(ds.bounds)]
if len(bb) != 1:
# Ingested product but dataset overlaps several/none tiles -- no good
raise ValueError('Broken GridSpec detected')
return bb[0]
if measurements is None and basis is None:
measurements = list(ds.type.measurements)
if is_doc_eo3(ds.metadata_doc):
if basis is not None:
return eo3_geobox(ds, basis)
gboxes = [eo3_geobox(ds, band) for band in measurements]
else:
if basis is not None:
return get_raster_info(ds, [basis])[basis].geobox
ii = get_raster_info(ds, measurements)
gboxes = [info.geobox for info in ii.values()]
geobox = gboxes[0]
consistent = all(geobox == gbox for gbox in gboxes)
if not consistent:
raise ValueError('Not all bands share the same pixel grid')
return geobox
def native_load(ds, measurements=None, basis=None, **kw):
"""Load single dataset in native resolution.
:param ds: Dataset
:param measurements: List of band names to load
:param basis: Name of the band to use for computing reference frame, other
bands might be reprojected if they use different pixel grid
:param **kw: Any other parameter load_data accepts
:return: Xarray dataset
"""
from datacube import Datacube
geobox = native_geobox(ds, measurements, basis) # early exit via exception if no compatible grid exists
if measurements is not None:
mm = ds.type.lookup_measurements(measurements)
else:
mm = ds.type.measurements
return Datacube.load_data(Datacube.group_datasets([ds], 'time'),
geobox,
measurements=mm, **kw)
def dc_read(path,
band=1,
gbox=None,
resampling='nearest',
dtype=None,
dst_nodata=None,
fallback_nodata=None):
"""
Use default io driver to read file without constructing Dataset object.
"""
source = RasterFileDataSource(path, band, nodata=fallback_nodata)
with source.open() as rdr:
dtype = rdr.dtype if dtype is None else dtype
if gbox is None:
gbox = rdr_geobox(rdr)
if dst_nodata is None:
dst_nodata = rdr.nodata
# currently dst_nodata = None case is not supported. So if fallback_nodata
# was None and file had none set, then use 0 as default output fill value
if dst_nodata is None:
dst_nodata = 0
im = np.full(gbox.shape, dst_nodata, dtype=dtype)
reproject_and_fuse([source], im, gbox, dst_nodata, resampling=resampling)
return im
def write_gtiff(fname,
pix,
crs='epsg:3857',
resolution=(10, -10),
offset=(0.0, 0.0),
nodata=None,
overwrite=False,
blocksize=None,
gbox=None,
**extra_rio_opts):
""" Write ndarray to GeoTiff file.
Geospatial info can be supplied either via
- resolution, offset, crs
or
- gbox (takes precedence if supplied)
"""
# pylint: disable=too-many-locals
from affine import Affine
import rasterio
from pathlib import Path
if pix.ndim == 2:
h, w = pix.shape
nbands = 1
band = 1
elif pix.ndim == 3:
nbands, h, w = pix.shape
band = tuple(i for i in range(1, nbands+1))
else:
raise ValueError('Need 2d or 3d ndarray on input')
if not isinstance(fname, Path):
fname = Path(fname)
if fname.exists():
if overwrite:
fname.unlink()
else:
raise IOError("File exists")
if gbox is not None:
assert gbox.shape == (h, w)
A = gbox.transform
crs = str(gbox.crs)
else:
sx, sy = resolution
tx, ty = offset
A = Affine(sx, 0, tx,
0, sy, ty)
rio_opts = dict(width=w,
height=h,
count=nbands,
dtype=pix.dtype.name,
crs=crs,
transform=A,
predictor=2,
compress='DEFLATE')
if blocksize is not None:
rio_opts.update(tiled=True,
blockxsize=min(blocksize, w),
blockysize=min(blocksize, h))
if nodata is not None:
rio_opts.update(nodata=nodata)
rio_opts.update(extra_rio_opts)
with rasterio.open(str(fname), 'w', driver='GTiff', **rio_opts) as dst:
dst.write(pix, band)
meta = dst.meta
meta['gbox'] = gbox if gbox is not None else rio_geobox(meta)
meta['path'] = fname
return SimpleNamespace(**meta)
def dc_crs_from_rio(crs):
from datacube.utils.geometry import CRS
if crs.is_epsg_code:
return CRS('EPSG:{}'.format(crs.to_epsg()))
return CRS(crs.wkt)
def rio_geobox(meta):
""" Construct geobox from src.meta of opened rasterio dataset
"""
if 'crs' not in meta or 'transform' not in meta:
return None
h, w = (meta['height'], meta['width'])
crs = dc_crs_from_rio(meta['crs'])
transform = meta['transform']
return GeoBox(w, h, transform, crs)
def _fix_resampling(kw):
r = kw.get('resampling', None)
if isinstance(r, str):
kw['resampling'] = resampling_s2rio(r)
def rio_slurp_reproject(fname, gbox, dtype=None, dst_nodata=None, **kw):
"""
Read image with reprojection
"""
import rasterio
from rasterio.warp import reproject
_fix_resampling(kw)
with rasterio.open(str(fname), 'r') as src:
if src.count == 1:
shape = gbox.shape
src_band = rasterio.band(src, 1)
else:
shape = (src.count, *gbox.shape)
src_band = rasterio.band(src, tuple(range(1, src.count+1)))
if dtype is None:
dtype = src.dtypes[0]
if dst_nodata is None:
dst_nodata = src.nodata
if dst_nodata is None:
dst_nodata = 0
pix = np.full(shape, dst_nodata, dtype=dtype)
reproject(src_band, pix,
dst_nodata=dst_nodata,
dst_transform=gbox.transform,
dst_crs=str(gbox.crs),
**kw)
meta = src.meta
meta['src_gbox'] = rio_geobox(meta)
meta['path'] = fname
meta['gbox'] = gbox
return pix, SimpleNamespace(**meta)
def rio_slurp_read(fname, out_shape=None, **kw):
"""
Read whole image file using rasterio.
:returns: ndarray (2d or 3d if multi-band), dict (rasterio meta)
"""
import rasterio
_fix_resampling(kw)
if out_shape is not None:
kw.update(out_shape=out_shape)
with rasterio.open(str(fname), 'r') as src:
data = src.read(1, **kw) if src.count == 1 else src.read(**kw)
meta = src.meta
src_gbox = rio_geobox(meta)
same_gbox = out_shape is None or out_shape == src_gbox.shape
gbox = src_gbox if same_gbox else gbx.zoom_to(src_gbox, out_shape)
meta['src_gbox'] = src_gbox
meta['gbox'] = gbox
meta['path'] = fname
return data, SimpleNamespace(**meta)
def rio_slurp(fname, *args, **kw):
"""
Dispatches to either:
rio_slurp_read(fname, out_shape, ..)
rio_slurp_reproject(fname, gbox, ...)
"""
if len(args) == 0:
if 'gbox' in kw:
return rio_slurp_reproject(fname, **kw)
else:
return rio_slurp_read(fname, **kw)
if isinstance(args[0], GeoBox):
return rio_slurp_reproject(fname, *args, **kw)
else:
return rio_slurp_read(fname, *args, **kw)
def rio_slurp_xarray(fname, *args, rgb='auto', **kw):
"""
Dispatches to either:
rio_slurp_read(fname, out_shape, ..)
rio_slurp_reproject(fname, gbox, ...)
then wraps it all in xarray.DataArray with .crs,.nodata etc.
"""
from xarray import DataArray
if len(args) == 0:
if 'gbox' in kw:
im, mm = rio_slurp_reproject(fname, **kw)
else:
im, mm = rio_slurp_read(fname, **kw)
else:
if isinstance(args[0], GeoBox):
im, mm = rio_slurp_reproject(fname, *args, **kw)
else:
im, mm = rio_slurp_read(fname, *args, **kw)
if im.ndim == 3:
dims = ('band', *mm.gbox.dims)
if rgb and im.shape[0] in (3, 4):
im = im.transpose([1, 2, 0])
dims = tuple(dims[i] for i in [1, 2, 0])
else:
dims = mm.gbox.dims
return DataArray(im,
dims=dims,
coords=mm.gbox.xr_coords(with_crs=True),
attrs=dict(
nodata=mm.nodata))
|
# This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2020 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import toolz
from ..model import Dataset
from ..storage import reproject_and_fuse, BandInfo
from ..storage._rio import RasterioDataSource, RasterDatasetDataSource
from ..utils.geometry._warp import resampling_s2rio
from ..storage._read import rdr_geobox
from ..utils.geometry import GeoBox
from ..utils.geometry import gbox as gbx
from ..index.eo3 import is_doc_eo3, _norm_grid # type: ignore[attr-defined]
from types import SimpleNamespace
class RasterFileDataSource(RasterioDataSource):
""" This is only used in test code
"""
def __init__(self, filename, bandnumber, nodata=None, crs=None, transform=None, lock=None):
super(RasterFileDataSource, self).__init__(filename, nodata, lock=lock)
self.bandnumber = bandnumber
self.crs = crs
self.transform = transform
def get_bandnumber(self, src):
return self.bandnumber
def get_transform(self, shape):
if self.transform is None:
raise RuntimeError('No transform in the data and no fallback')
return self.transform
def get_crs(self):
if self.crs is None:
raise RuntimeError('No CRS in the data and no fallback')
return self.crs
def _raster_metadata(band):
source = RasterDatasetDataSource(band)
with source.open() as rdr:
return SimpleNamespace(dtype=rdr.dtype.name,
nodata=rdr.nodata,
geobox=rdr_geobox(rdr))
def get_raster_info(ds: Dataset, measurements=None):
"""
:param ds: Dataset
:param measurements: List of band names to load
"""
if measurements is None:
measurements = list(ds.type.measurements)
return {n: _raster_metadata(BandInfo(ds, n))
for n in measurements}
def eo3_geobox(ds: Dataset, band: str) -> GeoBox:
mm = ds.measurements.get(ds.type.canonical_measurement(band),
None)
if mm is None:
raise ValueError(f"No such band: {band}")
crs = ds.crs
doc_path = ('grids', mm.get('grid', 'default'))
grid = toolz.get_in(doc_path, ds.metadata_doc)
if crs is None or grid is None:
raise ValueError('Not a valid EO3 dataset')
grid = _norm_grid(grid)
h, w = grid.shape
return GeoBox(w, h, grid.transform, crs)
def native_geobox(ds, measurements=None, basis=None):
"""Compute native GeoBox for a set of bands for a given dataset
:param ds: Dataset
:param measurements: List of band names to consider
:param basis: Name of the band to use for computing reference frame, other
bands might be reprojected if they use different pixel grid
:return: GeoBox describing native storage coordinates.
"""
gs = ds.type.grid_spec
if gs is not None:
# Dataset is from ingested product, figure out GeoBox of the tile this dataset covers
bb = [gbox for _, gbox in gs.tiles(ds.bounds)]
if len(bb) != 1:
# Ingested product but dataset overlaps several/none tiles -- no good
raise ValueError('Broken GridSpec detected')
return bb[0]
if measurements is None and basis is None:
measurements = list(ds.type.measurements)
if is_doc_eo3(ds.metadata_doc):
if basis is not None:
return eo3_geobox(ds, basis)
gboxes = [eo3_geobox(ds, band) for band in measurements]
else:
if basis is not None:
return get_raster_info(ds, [basis])[basis].geobox
ii = get_raster_info(ds, measurements)
gboxes = [info.geobox for info in ii.values()]
geobox = gboxes[0]
consistent = all(geobox == gbox for gbox in gboxes)
if not consistent:
raise ValueError('Not all bands share the same pixel grid')
return geobox
def native_load(ds, measurements=None, basis=None, **kw):
"""Load single dataset in native resolution.
:param ds: Dataset
:param measurements: List of band names to load
:param basis: Name of the band to use for computing reference frame, other
bands might be reprojected if they use different pixel grid
:param **kw: Any other parameter load_data accepts
:return: Xarray dataset
"""
from datacube import Datacube
geobox = native_geobox(ds, measurements, basis) # early exit via exception if no compatible grid exists
if measurements is not None:
mm = ds.type.lookup_measurements(measurements)
else:
mm = ds.type.measurements
return Datacube.load_data(Datacube.group_datasets([ds], 'time'),
geobox,
measurements=mm, **kw)
def dc_read(path,
band=1,
gbox=None,
resampling='nearest',
dtype=None,
dst_nodata=None,
fallback_nodata=None):
"""
Use default io driver to read file without constructing Dataset object.
"""
source = RasterFileDataSource(path, band, nodata=fallback_nodata)
with source.open() as rdr:
dtype = rdr.dtype if dtype is None else dtype
if gbox is None:
gbox = rdr_geobox(rdr)
if dst_nodata is None:
dst_nodata = rdr.nodata
# currently dst_nodata = None case is not supported. So if fallback_nodata
# was None and file had none set, then use 0 as default output fill value
if dst_nodata is None:
dst_nodata = 0
im = np.full(gbox.shape, dst_nodata, dtype=dtype)
reproject_and_fuse([source], im, gbox, dst_nodata, resampling=resampling)
return im
def write_gtiff(fname,
pix,
crs='epsg:3857',
resolution=(10, -10),
offset=(0.0, 0.0),
nodata=None,
overwrite=False,
blocksize=None,
gbox=None,
**extra_rio_opts):
""" Write ndarray to GeoTiff file.
Geospatial info can be supplied either via
- resolution, offset, crs
or
- gbox (takes precedence if supplied)
"""
# pylint: disable=too-many-locals
from affine import Affine
import rasterio
from pathlib import Path
if pix.ndim == 2:
h, w = pix.shape
nbands = 1
band = 1
elif pix.ndim == 3:
nbands, h, w = pix.shape
band = tuple(i for i in range(1, nbands+1))
else:
raise ValueError('Need 2d or 3d ndarray on input')
if not isinstance(fname, Path):
fname = Path(fname)
if fname.exists():
if overwrite:
fname.unlink()
else:
raise IOError("File exists")
if gbox is not None:
assert gbox.shape == (h, w)
A = gbox.transform
crs = str(gbox.crs)
else:
sx, sy = resolution
tx, ty = offset
A = Affine(sx, 0, tx,
0, sy, ty)
rio_opts = dict(width=w,
height=h,
count=nbands,
dtype=pix.dtype.name,
crs=crs,
transform=A,
predictor=2,
compress='DEFLATE')
if blocksize is not None:
rio_opts.update(tiled=True,
blockxsize=min(blocksize, w),
blockysize=min(blocksize, h))
if nodata is not None:
rio_opts.update(nodata=nodata)
rio_opts.update(extra_rio_opts)
with rasterio.open(str(fname), 'w', driver='GTiff', **rio_opts) as dst:
dst.write(pix, band)
meta = dst.meta
meta['gbox'] = gbox if gbox is not None else rio_geobox(meta)
meta['path'] = fname
return SimpleNamespace(**meta)
def dc_crs_from_rio(crs):
from datacube.utils.geometry import CRS
if crs.is_epsg_code:
return CRS('EPSG:{}'.format(crs.to_epsg()))
return CRS(crs.wkt)
def rio_geobox(meta):
""" Construct geobox from src.meta of opened rasterio dataset
"""
if 'crs' not in meta or 'transform' not in meta:
return None
h, w = (meta['height'], meta['width'])
crs = dc_crs_from_rio(meta['crs'])
transform = meta['transform']
return GeoBox(w, h, transform, crs)
def _fix_resampling(kw):
r = kw.get('resampling', None)
if isinstance(r, str):
kw['resampling'] = resampling_s2rio(r)
def rio_slurp_reproject(fname, gbox, dtype=None, dst_nodata=None, **kw):
"""
Read image with reprojection
"""
import rasterio
from rasterio.warp import reproject
_fix_resampling(kw)
with rasterio.open(str(fname), 'r') as src:
if src.count == 1:
shape = gbox.shape
src_band = rasterio.band(src, 1)
else:
shape = (src.count, *gbox.shape)
src_band = rasterio.band(src, tuple(range(1, src.count+1)))
if dtype is None:
dtype = src.dtypes[0]
if dst_nodata is None:
dst_nodata = src.nodata
if dst_nodata is None:
dst_nodata = 0
pix = np.full(shape, dst_nodata, dtype=dtype)
reproject(src_band, pix,
dst_nodata=dst_nodata,
dst_transform=gbox.transform,
dst_crs=str(gbox.crs),
**kw)
meta = src.meta
meta['src_gbox'] = rio_geobox(meta)
meta['path'] = fname
meta['gbox'] = gbox
return pix, SimpleNamespace(**meta)
def rio_slurp_read(fname, out_shape=None, **kw):
"""
Read whole image file using rasterio.
:returns: ndarray (2d or 3d if multi-band), dict (rasterio meta)
"""
import rasterio
_fix_resampling(kw)
if out_shape is not None:
kw.update(out_shape=out_shape)
with rasterio.open(str(fname), 'r') as src:
data = src.read(1, **kw) if src.count == 1 else src.read(**kw)
meta = src.meta
src_gbox = rio_geobox(meta)
same_gbox = out_shape is None or out_shape == src_gbox.shape
gbox = src_gbox if same_gbox else gbx.zoom_to(src_gbox, out_shape)
meta['src_gbox'] = src_gbox
meta['gbox'] = gbox
meta['path'] = fname
return data, SimpleNamespace(**meta)
def rio_slurp(fname, *args, **kw):
"""
Dispatches to either:
rio_slurp_read(fname, out_shape, ..)
rio_slurp_reproject(fname, gbox, ...)
"""
if len(args) == 0:
if 'gbox' in kw:
return rio_slurp_reproject(fname, **kw)
else:
return rio_slurp_read(fname, **kw)
if isinstance(args[0], GeoBox):
return rio_slurp_reproject(fname, *args, **kw)
else:
return rio_slurp_read(fname, *args, **kw)
def rio_slurp_xarray(fname, *args, rgb='auto', **kw):
"""
Dispatches to either:
rio_slurp_read(fname, out_shape, ..)
rio_slurp_reproject(fname, gbox, ...)
then wraps it all in xarray.DataArray with .crs,.nodata etc.
"""
from xarray import DataArray
if len(args) == 0:
if 'gbox' in kw:
im, mm = rio_slurp_reproject(fname, **kw)
else:
im, mm = rio_slurp_read(fname, **kw)
else:
if isinstance(args[0], GeoBox):
im, mm = rio_slurp_reproject(fname, *args, **kw)
else:
im, mm = rio_slurp_read(fname, *args, **kw)
if im.ndim == 3:
dims = ('band', *mm.gbox.dims)
if rgb and im.shape[0] in (3, 4):
im = im.transpose([1, 2, 0])
dims = tuple(dims[i] for i in [1, 2, 0])
else:
dims = mm.gbox.dims
return DataArray(im,
dims=dims,
coords=mm.gbox.xr_coords(with_crs=True),
attrs=dict(
nodata=mm.nodata))
|
en
| 0.72892
|
# This file is part of the Open Data Cube, see https://opendatacube.org for more information # # Copyright (c) 2015-2020 ODC Contributors # SPDX-License-Identifier: Apache-2.0 # type: ignore[attr-defined] This is only used in test code :param ds: Dataset :param measurements: List of band names to load Compute native GeoBox for a set of bands for a given dataset :param ds: Dataset :param measurements: List of band names to consider :param basis: Name of the band to use for computing reference frame, other bands might be reprojected if they use different pixel grid :return: GeoBox describing native storage coordinates. # Dataset is from ingested product, figure out GeoBox of the tile this dataset covers # Ingested product but dataset overlaps several/none tiles -- no good Load single dataset in native resolution. :param ds: Dataset :param measurements: List of band names to load :param basis: Name of the band to use for computing reference frame, other bands might be reprojected if they use different pixel grid :param **kw: Any other parameter load_data accepts :return: Xarray dataset # early exit via exception if no compatible grid exists Use default io driver to read file without constructing Dataset object. # currently dst_nodata = None case is not supported. So if fallback_nodata # was None and file had none set, then use 0 as default output fill value Write ndarray to GeoTiff file. Geospatial info can be supplied either via - resolution, offset, crs or - gbox (takes precedence if supplied) # pylint: disable=too-many-locals Construct geobox from src.meta of opened rasterio dataset Read image with reprojection Read whole image file using rasterio. :returns: ndarray (2d or 3d if multi-band), dict (rasterio meta) Dispatches to either: rio_slurp_read(fname, out_shape, ..) rio_slurp_reproject(fname, gbox, ...) Dispatches to either: rio_slurp_read(fname, out_shape, ..) rio_slurp_reproject(fname, gbox, ...) then wraps it all in xarray.DataArray with .crs,.nodata etc.
| 2.068309
| 2
|
github_linter/tests/pylintrc.py
|
yaleman/github_linter
| 0
|
6625889
|
<filename>github_linter/tests/pylintrc.py<gh_stars>0
""" checks for dependabot config """
from configparser import ConfigParser, NoOptionError # , NoSectionError
from typing import List, Optional, TypedDict
# from jinja2 import Environment, PackageLoader, select_autoescape
# import jinja2.exceptions
import json5 as json
from loguru import logger
from github_linter.repolinter import RepoLinter
CATEGORY = "pylintrc"
LANGUAGES = ["python"]
# TODO: look in "/<repo.name>/.pylintrc"
class PylintRC(TypedDict):
""" defines the settings for a pylintrc file """
disable: List[str]
max_line_length: Optional[int]
class DefaultConfig(TypedDict):
""" config typing for module config """
# https://pylint.pycqa.org/en/latest/user_guide/run.html
pylintrc_locations: List[str]
pylintrc: Optional[PylintRC]
DEFAULT_CONFIG: DefaultConfig = {
# documentation for list of locations
# https://pylint.pycqa.org/en/latest/user_guide/run.html?highlight=pylintrc#command-line-options
"pylintrc_locations": [
".pylintrc",
"pylintrc",
# "pyproject.toml" # providing it has at least one tool.pylint. section
# "setup.cfg" # needs pylint.*
],
"pylintrc": None,
}
def load_pylintrc(repo: RepoLinter, clear_cache: bool = False) -> Optional[ConfigParser]:
""" grabs the .pylintrc file from the repository """
for filepath in repo.config[CATEGORY]["pylintrc_locations"]:
contents = repo.cached_get_file(filepath, clear_cache)
if not contents:
continue
config = ConfigParser()
if not contents.content:
return None
config.read_string(contents.decoded_content.decode("utf-8"))
logger.debug("Successfully loaded {}", filepath)
return config
return None
def check_max_line_length_configured(repo: RepoLinter) -> None:
""" checks for the max-line-length setting in .pylintrc """
# default setting
if "pylintrc" in repo.config:
if "max_line_length" not in repo.config[CATEGORY]:
logger.debug("max_line_length not set in config, no need to run.")
return
config: Optional[ConfigParser] = load_pylintrc(repo)
if not config:
repo.warning(CATEGORY, ".pylintrc not found")
return
if "MASTER" not in config.sections():
logger.debug("Can't find MASTER entry, dumping config")
logger.debug(json.dumps(config, indent=4, default=str, ensure_ascii=False))
return
try:
linelength = config.get("MASTER", "max-line-length")
except NoOptionError:
repo.warning(CATEGORY, "max-line-length not configured")
return
expected = repo.config[CATEGORY]["max_line_length"]
if int(linelength) != int(expected):
repo.error(
CATEGORY,
f"max-line-length wrong, is {linelength}, should be {expected}",
)
return
def check_pylintrc(
repo: RepoLinter,
) -> None:
""" checks for .pylintrc config """
pylintrc = repo.cached_get_file(".pylintrc")
if not pylintrc:
repo.warning(CATEGORY, ".pylintrc not found")
def fix_pylintrc_missing(
repo: RepoLinter,
) -> None:
""" if there's no .pylintrc at all, add one """
logger.error("SKIPPING PYLINTRC UNTIL IT IS MOVED TO PYPROJECT - ref #73 - running in {}", repo.repository.full_name)
# if not repo.config[CATEGORY]["pylintrc_locations"]:
# logger.debug(
# "pylintrc_locations has been set to an empty list, bailing on this fix."
# )
# return
# if not repo.config[CATEGORY]["pylintrc"]:
# logger.debug("pylintrc not configured, bailing on this fix.")
# return
# # check if the pylintrc file exists in any of the check places
# for filepath in repo.config[CATEGORY]["pylintrc_locations"]:
# filecontents = repo.cached_get_file(filepath, clear_cache=True)
# if filecontents:
# logger.debug("File exists in {}, no action required.", filepath)
# return
# filepath = repo.config[CATEGORY]["pylintrc_locations"][0]
# logger.debug("Writing pylintrc file at: {}", filepath)
# # start up jinja2
# jinja2_env = Environment(
# loader=PackageLoader(package_name="github_linter", package_path="."),
# autoescape=select_autoescape(),
# )
# try:
# template = jinja2_env.get_template(f"fixes/{CATEGORY}/pylintrc")
# context = {}
# for key in repo.config[CATEGORY]["pylintrc"]:
# if repo.config[CATEGORY]["pylintrc"]:
# context[key] = repo.config[CATEGORY]["pylintrc"][key]
# new_filecontents = template.render(**context)
# except jinja2.exceptions.TemplateNotFound as template_error:
# logger.error("Failed to load template: {}", template_error)
# commit_url = repo.create_or_update_file(
# filepath=filepath,
# newfile=new_filecontents,
# message=f"github-linter pylintrc module creating {filepath}",
# )
# repo.fix(CATEGORY, f"Created {filepath}, commit url: {commit_url}")
|
<filename>github_linter/tests/pylintrc.py<gh_stars>0
""" checks for dependabot config """
from configparser import ConfigParser, NoOptionError # , NoSectionError
from typing import List, Optional, TypedDict
# from jinja2 import Environment, PackageLoader, select_autoescape
# import jinja2.exceptions
import json5 as json
from loguru import logger
from github_linter.repolinter import RepoLinter
CATEGORY = "pylintrc"
LANGUAGES = ["python"]
# TODO: look in "/<repo.name>/.pylintrc"
class PylintRC(TypedDict):
""" defines the settings for a pylintrc file """
disable: List[str]
max_line_length: Optional[int]
class DefaultConfig(TypedDict):
""" config typing for module config """
# https://pylint.pycqa.org/en/latest/user_guide/run.html
pylintrc_locations: List[str]
pylintrc: Optional[PylintRC]
DEFAULT_CONFIG: DefaultConfig = {
# documentation for list of locations
# https://pylint.pycqa.org/en/latest/user_guide/run.html?highlight=pylintrc#command-line-options
"pylintrc_locations": [
".pylintrc",
"pylintrc",
# "pyproject.toml" # providing it has at least one tool.pylint. section
# "setup.cfg" # needs pylint.*
],
"pylintrc": None,
}
def load_pylintrc(repo: RepoLinter, clear_cache: bool = False) -> Optional[ConfigParser]:
""" grabs the .pylintrc file from the repository """
for filepath in repo.config[CATEGORY]["pylintrc_locations"]:
contents = repo.cached_get_file(filepath, clear_cache)
if not contents:
continue
config = ConfigParser()
if not contents.content:
return None
config.read_string(contents.decoded_content.decode("utf-8"))
logger.debug("Successfully loaded {}", filepath)
return config
return None
def check_max_line_length_configured(repo: RepoLinter) -> None:
""" checks for the max-line-length setting in .pylintrc """
# default setting
if "pylintrc" in repo.config:
if "max_line_length" not in repo.config[CATEGORY]:
logger.debug("max_line_length not set in config, no need to run.")
return
config: Optional[ConfigParser] = load_pylintrc(repo)
if not config:
repo.warning(CATEGORY, ".pylintrc not found")
return
if "MASTER" not in config.sections():
logger.debug("Can't find MASTER entry, dumping config")
logger.debug(json.dumps(config, indent=4, default=str, ensure_ascii=False))
return
try:
linelength = config.get("MASTER", "max-line-length")
except NoOptionError:
repo.warning(CATEGORY, "max-line-length not configured")
return
expected = repo.config[CATEGORY]["max_line_length"]
if int(linelength) != int(expected):
repo.error(
CATEGORY,
f"max-line-length wrong, is {linelength}, should be {expected}",
)
return
def check_pylintrc(
repo: RepoLinter,
) -> None:
""" checks for .pylintrc config """
pylintrc = repo.cached_get_file(".pylintrc")
if not pylintrc:
repo.warning(CATEGORY, ".pylintrc not found")
def fix_pylintrc_missing(
repo: RepoLinter,
) -> None:
""" if there's no .pylintrc at all, add one """
logger.error("SKIPPING PYLINTRC UNTIL IT IS MOVED TO PYPROJECT - ref #73 - running in {}", repo.repository.full_name)
# if not repo.config[CATEGORY]["pylintrc_locations"]:
# logger.debug(
# "pylintrc_locations has been set to an empty list, bailing on this fix."
# )
# return
# if not repo.config[CATEGORY]["pylintrc"]:
# logger.debug("pylintrc not configured, bailing on this fix.")
# return
# # check if the pylintrc file exists in any of the check places
# for filepath in repo.config[CATEGORY]["pylintrc_locations"]:
# filecontents = repo.cached_get_file(filepath, clear_cache=True)
# if filecontents:
# logger.debug("File exists in {}, no action required.", filepath)
# return
# filepath = repo.config[CATEGORY]["pylintrc_locations"][0]
# logger.debug("Writing pylintrc file at: {}", filepath)
# # start up jinja2
# jinja2_env = Environment(
# loader=PackageLoader(package_name="github_linter", package_path="."),
# autoescape=select_autoescape(),
# )
# try:
# template = jinja2_env.get_template(f"fixes/{CATEGORY}/pylintrc")
# context = {}
# for key in repo.config[CATEGORY]["pylintrc"]:
# if repo.config[CATEGORY]["pylintrc"]:
# context[key] = repo.config[CATEGORY]["pylintrc"][key]
# new_filecontents = template.render(**context)
# except jinja2.exceptions.TemplateNotFound as template_error:
# logger.error("Failed to load template: {}", template_error)
# commit_url = repo.create_or_update_file(
# filepath=filepath,
# newfile=new_filecontents,
# message=f"github-linter pylintrc module creating {filepath}",
# )
# repo.fix(CATEGORY, f"Created {filepath}, commit url: {commit_url}")
|
en
| 0.475585
|
checks for dependabot config # , NoSectionError # from jinja2 import Environment, PackageLoader, select_autoescape # import jinja2.exceptions # TODO: look in "/<repo.name>/.pylintrc" defines the settings for a pylintrc file config typing for module config # https://pylint.pycqa.org/en/latest/user_guide/run.html # documentation for list of locations # https://pylint.pycqa.org/en/latest/user_guide/run.html?highlight=pylintrc#command-line-options # "pyproject.toml" # providing it has at least one tool.pylint. section # "setup.cfg" # needs pylint.* grabs the .pylintrc file from the repository checks for the max-line-length setting in .pylintrc # default setting checks for .pylintrc config if there's no .pylintrc at all, add one #73 - running in {}", repo.repository.full_name) # if not repo.config[CATEGORY]["pylintrc_locations"]: # logger.debug( # "pylintrc_locations has been set to an empty list, bailing on this fix." # ) # return # if not repo.config[CATEGORY]["pylintrc"]: # logger.debug("pylintrc not configured, bailing on this fix.") # return # # check if the pylintrc file exists in any of the check places # for filepath in repo.config[CATEGORY]["pylintrc_locations"]: # filecontents = repo.cached_get_file(filepath, clear_cache=True) # if filecontents: # logger.debug("File exists in {}, no action required.", filepath) # return # filepath = repo.config[CATEGORY]["pylintrc_locations"][0] # logger.debug("Writing pylintrc file at: {}", filepath) # # start up jinja2 # jinja2_env = Environment( # loader=PackageLoader(package_name="github_linter", package_path="."), # autoescape=select_autoescape(), # ) # try: # template = jinja2_env.get_template(f"fixes/{CATEGORY}/pylintrc") # context = {} # for key in repo.config[CATEGORY]["pylintrc"]: # if repo.config[CATEGORY]["pylintrc"]: # context[key] = repo.config[CATEGORY]["pylintrc"][key] # new_filecontents = template.render(**context) # except jinja2.exceptions.TemplateNotFound as template_error: # logger.error("Failed to load template: {}", template_error) # commit_url = repo.create_or_update_file( # filepath=filepath, # newfile=new_filecontents, # message=f"github-linter pylintrc module creating {filepath}", # ) # repo.fix(CATEGORY, f"Created {filepath}, commit url: {commit_url}")
| 2.252327
| 2
|
.history/routes_20200723152523.py
|
rkustas/taskmanager
| 0
|
6625890
|
from app import app, db
from flask import render_template, redirect, url_for, flash, get_flashed_messages
from models import Task
from datetime import datetime
import forms
# Basic route
@app.route('/')
@app.route('/index')
def index():
tasks = Task.query.all()
return render_template('index.html', tasks=tasks)
@app.route('/add', methods=['GET','POST'])
def add():
form = forms.AddTaskForm()
if form.validate_on_submit():
t = Task(title=form.title.data, date=datetime.utcnow())
db.session.add(t)
db.session.commit()
flash('Task added to the database')
print('A new task was added', form.title.data)
return redirect(url_for('index'))
return render_template('add.html', form=form)
|
from app import app, db
from flask import render_template, redirect, url_for, flash, get_flashed_messages
from models import Task
from datetime import datetime
import forms
# Basic route
@app.route('/')
@app.route('/index')
def index():
tasks = Task.query.all()
return render_template('index.html', tasks=tasks)
@app.route('/add', methods=['GET','POST'])
def add():
form = forms.AddTaskForm()
if form.validate_on_submit():
t = Task(title=form.title.data, date=datetime.utcnow())
db.session.add(t)
db.session.commit()
flash('Task added to the database')
print('A new task was added', form.title.data)
return redirect(url_for('index'))
return render_template('add.html', form=form)
|
en
| 0.810421
|
# Basic route
| 2.314652
| 2
|
tmaps_mtmoran_getoutputinfo.py
|
CI-WATER/tmaps
| 0
|
6625891
|
<reponame>CI-WATER/tmaps<filename>tmaps_mtmoran_getoutputinfo.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
********************************************************************************
* Name: TMAPS ADHydro
* Author: <NAME>
* Created On: May 20, 2015
* Copyright: (c) Brigham Young University 2015
* License: BSD 2-Clause
********************************************************************************
This script uses the methods from frame_processing, render_adhydro, and run_tmachine in order to
process the output of ADHydro and generate a time machine viewer from the specifed results. A user
can specify the inputs to run the script on lines 36-40
'''
import os
import sys
import shutil
import multiprocessing
from datetime import datetime
import render_adhydro
import frame_processing
from PIL import Image
import run_tmachine
time_start = datetime.now()
#------------------------------------------------------------------------------
#main process
#------------------------------------------------------------------------------
output_dir = '/home/nrtaylor/Research/Files_From_MtMoran/Green_River_ADHydro/'
user_parameter = 'meshSurfacewaterDepth'
user_contour = 'blueyellowred'
user_opacity = 1
start_frame = 0
end_frame = 30
if __name__=="__main__":
runtmaps = render_adhydro.Adhydro_Render(output_dir, user_parameter,user_contour,user_opacity, start_frame, end_frame)
runtmaps.output_info()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
********************************************************************************
* Name: TMAPS ADHydro
* Author: <NAME>
* Created On: May 20, 2015
* Copyright: (c) Brigham Young University 2015
* License: BSD 2-Clause
********************************************************************************
This script uses the methods from frame_processing, render_adhydro, and run_tmachine in order to
process the output of ADHydro and generate a time machine viewer from the specifed results. A user
can specify the inputs to run the script on lines 36-40
'''
import os
import sys
import shutil
import multiprocessing
from datetime import datetime
import render_adhydro
import frame_processing
from PIL import Image
import run_tmachine
time_start = datetime.now()
#------------------------------------------------------------------------------
#main process
#------------------------------------------------------------------------------
output_dir = '/home/nrtaylor/Research/Files_From_MtMoran/Green_River_ADHydro/'
user_parameter = 'meshSurfacewaterDepth'
user_contour = 'blueyellowred'
user_opacity = 1
start_frame = 0
end_frame = 30
if __name__=="__main__":
runtmaps = render_adhydro.Adhydro_Render(output_dir, user_parameter,user_contour,user_opacity, start_frame, end_frame)
runtmaps.output_info()
|
en
| 0.458711
|
#!/usr/bin/python # -*- coding: utf-8 -*- ******************************************************************************** * Name: TMAPS ADHydro * Author: <NAME> * Created On: May 20, 2015 * Copyright: (c) Brigham Young University 2015 * License: BSD 2-Clause ******************************************************************************** This script uses the methods from frame_processing, render_adhydro, and run_tmachine in order to process the output of ADHydro and generate a time machine viewer from the specifed results. A user can specify the inputs to run the script on lines 36-40 #------------------------------------------------------------------------------ #main process #------------------------------------------------------------------------------
| 1.969196
| 2
|
src/config/api-server/vnc_cfg_api_server/vnc_cfg_api_server.py
|
sasikrishna2014/contrail-controller
| 0
|
6625892
|
<gh_stars>0
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
"""
This is the main module in vnc_cfg_api_server package. It manages interaction
between http/rest, address management, authentication and database interfaces.
"""
from gevent import monkey
monkey.patch_all()
from gevent import hub
# from neutron plugin to api server, the request URL could be large.
# fix the const
import gevent.pywsgi
gevent.pywsgi.MAX_REQUEST_LINE = 65535
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
import ConfigParser
import functools
import hashlib
import logging
import logging.config
import signal
import netaddr
import os
import re
import random
import socket
from cfgm_common import jsonutils as json
from provision_defaults import *
import uuid
import copy
from pprint import pformat
from cStringIO import StringIO
from vnc_api.utils import AAA_MODE_VALID_VALUES
# import GreenletProfiler
from cfgm_common import vnc_cgitb
import subprocess
import traceback
from kazoo.exceptions import LockTimeout
from cfgm_common import has_role
from cfgm_common import _obj_serializer_all
from cfgm_common.utils import _DEFAULT_ZK_COUNTER_PATH_PREFIX
from cfgm_common.utils import _DEFAULT_ZK_LOCK_PATH_PREFIX
from cfgm_common import is_uuid_like
from cfgm_common import SG_NO_RULE_FQ_NAME, SG_NO_RULE_NAME, UUID_PATTERN
from cfgm_common.uve.vnc_api.ttypes import VncApiLatencyStats, VncApiLatencyStatsLog
logger = logging.getLogger(__name__)
import time
import requests
import xml.etree.ElementTree as etree
from functools import partial
"""
Following is needed to silence warnings on every request when keystone
auth_token middleware + Sandesh is used. Keystone or Sandesh alone
do not produce these warnings.
Exception AttributeError: AttributeError(
"'_DummyThread' object has no attribute '_Thread__block'",)
in <module 'threading' from '/usr/lib64/python2.7/threading.pyc'> ignored
See http://stackoverflow.com/questions/13193278/understand-python-threading-bug
for more information.
"""
import threading
threading._DummyThread._Thread__stop = lambda x: 42
CONFIG_VERSION = '1.0'
import bottle
import utils
import context
from context import get_request, get_context, set_context, use_context
from context import ApiContext
from context import is_internal_request
import vnc_cfg_types
from vnc_db import VncDbClient
import cfgm_common
from cfgm_common import ignore_exceptions
from cfgm_common.uve.vnc_api.ttypes import VncApiCommon, VncApiConfigLog,\
VncApiDebug, VncApiInfo, VncApiNotice, VncApiError
from cfgm_common.uve.vnc_api.ttypes import FabricJobExecution, FabricJobUve, \
PhysicalRouterJobExecution, PhysicalRouterJobUve
from cfgm_common import illegal_xml_chars_RE
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, Module2NodeType,\
NodeTypeNames, INSTANCE_ID_DEFAULT, TagTypeNameToId,\
TAG_TYPE_NOT_UNIQUE_PER_OBJECT, TAG_TYPE_AUTHORIZED_ON_ADDRESS_GROUP,\
POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT, SECURITY_OBJECT_TYPES
from provision_defaults import Provision
from vnc_quota import *
from vnc_api.gen.resource_xsd import *
from vnc_api.gen.resource_common import *
from vnc_api.gen.vnc_api_client_gen import all_resource_type_tuples
import cfgm_common
from cfgm_common.utils import cgitb_hook
from cfgm_common.rest import LinkObject, hdr_server_tenant
from cfgm_common.exceptions import *
from cfgm_common.vnc_extensions import ExtensionManager
import vnc_addr_mgmt
import vnc_auth
import vnc_auth_keystone
import vnc_perms
import vnc_rbac
from cfgm_common.uve.cfgm_cpuinfo.ttypes import ModuleCpuState, ModuleCpuStateTrace
from cfgm_common.buildinfo import build_info
from cfgm_common.vnc_api_stats import log_api_stats
from pysandesh.sandesh_base import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
# from gen_py.vnc_api.ttypes import *
import netifaces
from pysandesh.connection_info import ConnectionState
from cfgm_common.uve.nodeinfo.ttypes import NodeStatusUVE, \
NodeStatus
from sandesh.traces.ttypes import RestApiTrace
from vnc_bottle import get_bottle_server
from cfgm_common.vnc_greenlets import VncGreenlet
_ACTION_RESOURCES = [
{'uri': '/prop-collection-get', 'link_name': 'prop-collection-get',
'method': 'GET', 'method_name': 'prop_collection_http_get'},
{'uri': '/prop-collection-update', 'link_name': 'prop-collection-update',
'method': 'POST', 'method_name': 'prop_collection_http_post'},
{'uri': '/ref-update', 'link_name': 'ref-update',
'method': 'POST', 'method_name': 'ref_update_http_post'},
{'uri': '/ref-relax-for-delete', 'link_name': 'ref-relax-for-delete',
'method': 'POST', 'method_name': 'ref_relax_for_delete_http_post'},
{'uri': '/fqname-to-id', 'link_name': 'name-to-id',
'method': 'POST', 'method_name': 'fq_name_to_id_http_post'},
{'uri': '/id-to-fqname', 'link_name': 'id-to-name',
'method': 'POST', 'method_name': 'id_to_fq_name_http_post'},
{'uri': '/useragent-kv', 'link_name': 'useragent-keyvalue',
'method': 'POST', 'method_name': 'useragent_kv_http_post'},
{'uri': '/db-check', 'link_name': 'database-check',
'method': 'POST', 'method_name': 'db_check'},
{'uri': '/fetch-records', 'link_name': 'fetch-records',
'method': 'POST', 'method_name': 'fetch_records'},
{'uri': '/start-profile', 'link_name': 'start-profile',
'method': 'POST', 'method_name': 'start_profile'},
{'uri': '/stop-profile', 'link_name': 'stop-profile',
'method': 'POST', 'method_name': 'stop_profile'},
{'uri': '/list-bulk-collection', 'link_name': 'list-bulk-collection',
'method': 'POST', 'method_name': 'list_bulk_collection_http_post'},
{'uri': '/obj-perms', 'link_name': 'obj-perms',
'method': 'GET', 'method_name': 'obj_perms_http_get'},
{'uri': '/chown', 'link_name': 'chown',
'method': 'POST', 'method_name': 'obj_chown_http_post'},
{'uri': '/chmod', 'link_name': 'chmod',
'method': 'POST', 'method_name': 'obj_chmod_http_post'},
{'uri': '/aaa-mode', 'link_name': 'aaa-mode',
'method': 'PUT', 'method_name': 'aaa_mode_http_put'},
{'uri': '/obj-cache', 'link_name': 'obj-cache',
'method': 'GET', 'method_name': 'dump_cache'},
{'uri': '/obj-cache', 'link_name': 'obj-cache',
'method': 'POST', 'method_name': 'dump_cache'},
{'uri': '/execute-job', 'link_name': 'execute-job',
'method': 'POST', 'method_name': 'execute_job_http_post'},
]
_MANDATORY_PROPS = [
'loadbalancer_healthmonitor_properties',
]
def error_400(err):
return err.body
# end error_400
def error_403(err):
return err.body
# end error_403
def error_404(err):
return err.body
# end error_404
def error_405(err):
return err.body
# end error_405
def error_409(err):
return err.body
# end error_409
@bottle.error(412)
def error_412(err):
return err.body
# end error_412
def error_500(err):
return err.body
# end error_500
def error_503(err):
return err.body
# end error_503
class VncApiServer(object):
"""
This is the manager class co-ordinating all classes present in the package
"""
_INVALID_NAME_CHARS = set(':')
_GENERATE_DEFAULT_INSTANCE = [
'namespace',
'project',
'virtual_network', 'virtual-network',
'network_ipam', 'network-ipam',
]
def __new__(cls, *args, **kwargs):
obj = super(VncApiServer, cls).__new__(cls, *args, **kwargs)
obj.api_bottle = bottle.Bottle()
obj.route('/', 'GET', obj.homepage_http_get)
obj.api_bottle.error_handler = {
400: error_400,
403: error_403,
404: error_404,
405: error_405,
409: error_409,
500: error_500,
503: error_503,
}
cls._generate_resource_crud_methods(obj)
cls._generate_resource_crud_uri(obj)
for act_res in _ACTION_RESOURCES:
http_method = act_res.get('method', 'POST')
method_name = getattr(obj, act_res['method_name'])
obj.route(act_res['uri'], http_method, method_name)
return obj
# end __new__
@classmethod
def _validate_complex_type(cls, dict_cls, dict_body):
if dict_body is None:
return
for key, value in dict_body.items():
if key not in dict_cls.attr_fields:
raise ValueError('class %s does not have field %s' % (
str(dict_cls), key))
attr_type_vals = dict_cls.attr_field_type_vals[key]
attr_type = attr_type_vals['attr_type']
restrictions = attr_type_vals['restrictions']
is_array = attr_type_vals.get('is_array', False)
if value is None:
continue
if is_array:
if not isinstance(value, list):
raise ValueError('Field %s must be a list. Received value: %s'
% (key, str(value)))
values = value
else:
values = [value]
if attr_type_vals['is_complex']:
attr_cls = cfgm_common.utils.str_to_class(attr_type, __name__)
for item in values:
if attr_type == 'AllowedAddressPair':
cls._validate_allowed_address_pair_prefix_len(item)
cls._validate_complex_type(attr_cls, item)
else:
simple_type = attr_type_vals['simple_type']
for item in values:
cls._validate_simple_type(key, attr_type,
simple_type, item,
restrictions)
# end _validate_complex_type
@classmethod
def _validate_allowed_address_pair_prefix_len(cls, value):
'''Do not allow configuration of AAP with
IPv4 prefix length less than 24 and 120 for IPv6.
LP #1720118
'''
if value['address_mode'] == 'active-standby':
ip_net_family = netaddr.IPNetwork(value['ip']['ip_prefix']).version
if ip_net_family == 6 and value['ip']['ip_prefix_len'] < 120:
raise ValueError('IPv6 Prefix length lesser than 120 is'
' is not acceptable')
if ip_net_family == 4 and value['ip']['ip_prefix_len'] < 24:
raise ValueError('IPv4 Prefix length lesser than 24'
' is not acceptable')
# end _validate_allowed_address_pair_prefix_len
@classmethod
def _validate_communityattribute_type(cls, value):
poss_values = ["no-export",
"accept-own",
"no-advertise",
"no-export-subconfed",
"no-reoriginate"]
if value in poss_values:
return
res = re.match('[0-9]+:[0-9]+', value)
if res is None:
raise ValueError('Invalid community format %s. '
'Change to \'number:number\''
% value)
asn = value.split(':')
if int(asn[0]) > 65535:
raise ValueError('Out of range ASN value %s. '
'ASN values cannot exceed 65535.'
% value)
@classmethod
def _validate_serviceinterface_type(cls, value):
poss_values = ["management",
"left",
"right"]
if value in poss_values:
return
res = re.match('other[0-9]*', value)
if res is None:
raise ValueError('Invalid service interface type %s. '
'Valid values are: management|left|right|other[0-9]*'
% value)
def validate_execute_job_input_params(self, request_params):
device_list = None
job_template_id = request_params.get('job_template_id')
job_template_fq_name = request_params.get('job_template_fq_name')
if not (job_template_id or job_template_fq_name):
err_msg = "Either job_template_id or job_template_fq_name" \
" required in request"
raise cfgm_common.exceptions.HttpError(400, err_msg)
# check if the job template id is a valid uuid
if job_template_id:
if self.invalid_uuid(job_template_id):
msg = 'Invalid job-template uuid type %s. uuid type required' \
% job_template_id
raise cfgm_common.exceptions.HttpError(400, msg)
try:
job_template_fqname = self._db_conn.uuid_to_fq_name(
job_template_id)
request_params['job_template_fq_name'] = job_template_fqname
except NoIdError as no_id_exec:
raise cfgm_common.exceptions.HttpError(404, str(no_id_exec))
except Exception as e:
msg = "Error while reading job_template_id: " + str(e)
raise cfgm_common.exceptions.HttpError(400, msg)
else:
# check if the job template fqname is a valid fq_name
try:
job_template_id = self._db_conn.fq_name_to_uuid(
"job_template", job_template_fq_name)
request_params['job_template_id'] = job_template_id
except NoIdError as no_id_exec:
raise cfgm_common.exceptions.HttpError(404, str(no_id_exec))
except Exception as e:
msg = "Error while reading job_template_fqname: " + str(e)
raise cfgm_common.exceptions.HttpError(400, msg)
extra_params = request_params.get('params')
if extra_params is not None:
device_list = extra_params.get('device_list')
if device_list:
if not isinstance(device_list, list):
err_msg = "malformed request param: device_list, " \
"expects list"
raise cfgm_common.exceptions.HttpError(400, err_msg)
for device_id in device_list:
if not isinstance(device_id, basestring):
err_msg = "malformed request param: device_list, " \
"expects list of string device_uuids," \
" found device_uuid %s" % device_id
raise cfgm_common.exceptions.HttpError(400, err_msg)
# check if the device id passed is a valid uuid
if self.invalid_uuid(device_id):
msg = 'Invalid device uuid type %s.' \
' uuid type required' % device_id
raise cfgm_common.exceptions.HttpError(400, msg)
return device_list
def job_mgr_signal_handler(self, signalnum, frame):
try:
#get the child process id that called the signal handler
pid = os.waitpid(-1, os.WNOHANG)
signal_var = self._job_mgr_running_instances.get(str(pid[0]))
if not signal_var:
self.config_log("job mgr process %s not found in the instance "
"map!" % str(pid), level=SandeshLevel.SYS_ERR)
return
msg = "Entered job_mgr_signal_handler for: %s" % signal_var
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
# update job manager execution status uve
elapsed_time = time.time() - signal_var.get('start_time')
status = "UNKNOWN"
if signal_var.get('fabric_name') is not "__DEFAULT__":
try:
# read the job object log for a particular job to check if
# it succeeded or not
jobObjLog_payload = {
'start_time': 'now-%ds' % (elapsed_time),
'end_time': 'now',
'select_fields': ['MessageTS', 'Messagetype', 'ObjectLog'],
'table': 'ObjectJobExecutionTable',
'where': [
[
{
'name': 'ObjectId',
'value': '%s:SUCCESS' % signal_var.get('exec_id'),
'op': 1
}
]
]
}
url = "http://localhost:8081/analytics/query"
resp = requests.post(url, json=jobObjLog_payload)
if resp.status_code == 200:
JobLog = resp.json().get('value')
if not JobLog:
status = 'FAILURE'
else:
status = 'SUCCESS'
else:
self.config_log("POST request to query job object log "
"failed with error %s" %
resp.status_code,
level=SandeshLevel.SYS_ERR)
except (requests.ConnectionError, requests.ConnectTimeout,
requests.HTTPError, requests.Timeout) as ex:
self.config_log("POST request to query job object log "
"failed with error %s" % str(ex),
level=SandeshLevel.SYS_ERR)
pass
#send uve irrespective of the job log query
# success/failure with job status
job_execution_data = FabricJobExecution(
name=signal_var.get('fabric_name'),
job_status=status,
percentage_completed=100)
job_execution_uve = FabricJobUve(data=job_execution_data,
sandesh=self._sandesh)
job_execution_uve.send(sandesh=self._sandesh)
try:
# read the last PRouter state for all Prouetrs
payload = {
'sort':1,
'start_time': 'now-%ds' % (elapsed_time),
'sort_fields': ['MessageTS'],
'end_time': 'now',
'select_fields': ['MessageTS', 'Messagetype', 'ObjectLog'],
'table': 'ObjectJobExecutionTable',
'where': [
[
{
'name': 'Messagetype',
'value': 'PRouterOnboardingLog',
'op': 1
},
{
'name': 'ObjectId',
'value': '%s' % signal_var.get('exec_id'),
'op': 1
}
]
]
}
url = "http://localhost:8081/analytics/query"
resp = requests.post(url, json=payload)
if resp.status_code == 200:
PRouterOnboardingLog = resp.json().get('value')
for PRObjectLog in PRouterOnboardingLog:
resp = PRObjectLog.get('ObjectLog')
xmlresp = etree.fromstring(resp)
for ele in xmlresp.iter():
if ele.tag == 'name':
device_fqname = ele.text
if ele.tag == 'onboarding_state':
onboarding_state = ele.text
if device_fqname and onboarding_state:
prouter_uve_name = device_fqname + ":" + \
signal_var.get('fabric_name')
prouter_job_data = PhysicalRouterJobExecution(
name=prouter_uve_name,
execution_id=signal_var.get('exec_id'),
job_start_ts=int(round(signal_var.get('start_time') * 1000)),
prouter_state=onboarding_state
)
prouter_job_uve = PhysicalRouterJobUve(
data=prouter_job_data, sandesh=self._sandesh)
prouter_job_uve.send(sandesh=self._sandesh)
else:
self.config_log("POST request to query Prouter job "
"object log failed with error %s" %
resp.status_code,
level=SandeshLevel.SYS_ERR)
except (requests.ConnectionError, requests.ConnectTimeout,
requests.HTTPError, requests.Timeout) as ex:
self.config_log("POST request to query Prouter job object "
"log failed with error %s" % str(ex),
level=SandeshLevel.SYS_ERR)
finally:
#remove the pid entry of the processed job_mgr process
del self._job_mgr_running_instances[str(pid[0])]
except OSError as process_error:
self.config_log("Couldn retrieve the child process id. OS call "
"returned with error %s" % str(process_error),
level=SandeshLevel.SYS_ERR)
def execute_job_http_post(self):
''' Payload of execute_job
job_template_id (Mandatory if no job_template_fq_name): <uuid> of
the created job_template
job_template_fq_name (Mandatory if no job_template_id): fqname in
the format: ["<global-system-config-name>",
"<name of the job-template>"]
input (Type json): Input Schema of the playbook under the
job_template_id
params (Type json): Extra_params for the job_manager
(Eg. device_list)
E.g. Payload:
{
"job_template_id": "<uuid>",
"params": {
"device_list": ["<device_uuid1>", "<device_uuid2>", ....
"<device_uuidn>"]
}
}
'''
try:
if not self._args.enable_fabric_ansible:
err_msg = "Fabric ansible job manager is disabled. " \
"Please enable it by setting the " \
"'enable_fabric_ansible' to True in the conf file"
raise cfgm_common.exceptions.HttpError(405, err_msg)
self.config_log("Entered execute-job",
level=SandeshLevel.SYS_NOTICE)
request_params = get_request().json
msg = "Job Input %s " % json.dumps(request_params)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
device_list = self.validate_execute_job_input_params(
request_params)
# TODO - pass the job manager config file from api server config
# read the device object and pass the necessary data to the job
if device_list:
self.read_device_data(device_list, request_params)
else:
self.read_fabric_data(request_params)
# generate the job execution id
execution_id = uuid.uuid4()
request_params['job_execution_id'] = str(execution_id)
# get the auth token
auth_token = get_request().get_header('X-Auth-Token')
request_params['auth_token'] = auth_token
# pass the required config args to job manager
job_args = {'collectors': self._args.collectors,
'fabric_ansible_conf_file':
self._args.fabric_ansible_conf_file
}
request_params['args'] = json.dumps(job_args)
fabric_job_name = request_params.get('job_template_fq_name')
fabric_job_name.insert(0, request_params.get('fabric_fq_name'))
fabric_job_uve_name = ':'.join(map(str, fabric_job_name))
# create job manager fabric execution status uve
if request_params.get('fabric_fq_name') is not "__DEFAULT__":
job_execution_data = FabricJobExecution(
name=fabric_job_uve_name,
execution_id=request_params.get('job_execution_id'),
job_start_ts=int(round(time.time() * 1000)),
job_status="STARTING",
percentage_completed=0.0
)
job_execution_uve = FabricJobUve(data=job_execution_data,
sandesh=self._sandesh)
job_execution_uve.send(sandesh=self._sandesh)
if device_list:
for device_id in device_list:
device_fqname = request_params.get(
'device_json').get(device_id).get('device_fqname')
device_fqname = ':'.join(map(str, device_fqname))
prouter_uve_name = device_fqname + ":" + \
fabric_job_uve_name
prouter_job_data = PhysicalRouterJobExecution(
name=prouter_uve_name,
execution_id=request_params.get('job_execution_id'),
job_start_ts=int(round(time.time() * 1000))
)
prouter_job_uve = PhysicalRouterJobUve(
data=prouter_job_data, sandesh=self._sandesh)
prouter_job_uve.send(sandesh=self._sandesh)
start_time = time.time()
signal_var = {
'fabric_name': fabric_job_uve_name ,
'start_time': start_time ,
'exec_id': request_params.get('job_execution_id')
}
# handle process exit signal
signal.signal(signal.SIGCHLD, self.job_mgr_signal_handler)
# create job manager subprocess
job_mgr_path = os.path.dirname(__file__) + "/../job_manager/job_mgr.py"
job_process = subprocess.Popen(["python", job_mgr_path, "-i",
json.dumps(request_params)],
cwd="/", close_fds=True)
self._job_mgr_running_instances[str(job_process.pid)] = signal_var
self.config_log("Created job manager process. Execution id: %s" %
execution_id,
level=SandeshLevel.SYS_NOTICE)
return {'job_execution_id': str(execution_id),
'job_manager_process_id': str(job_process.pid)}
except cfgm_common.exceptions.HttpError as e:
raise
except Exception as e:
err_msg = "Error while executing job request: %s" % repr(e)
raise cfgm_common.exceptions.HttpError(500, err_msg)
def read_fabric_data(self, request_params):
if request_params.get('input') is None:
err_msg = "Missing job input"
raise cfgm_common.exceptions.HttpError(400, err_msg)
# get the fabric fq_name from the database if fabric_uuid is provided
fabric_fq_name = None
if request_params.get('input').get('fabric_uuid'):
fabric_uuid = request_params.get('input').get('fabric_uuid')
try:
fabric_fq_name = self._db_conn.uuid_to_fq_name(fabric_uuid)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
elif request_params.get('input').get('fabric_fq_name'):
fabric_fq_name = request_params.get('input').get('fabric_fq_name')
else:
if "device_deletion_template" in request_params.get(
'job_template_fq_name'):
fabric_fq_name = "__DEFAULT__"
else:
err_msg = "Missing fabric details in the job input"
raise cfgm_common.exceptions.HttpError(400, err_msg)
if fabric_fq_name:
fabric_fq_name_str = ':'.join(map(str, fabric_fq_name))
request_params['fabric_fq_name'] = fabric_fq_name_str
def read_device_data(self, device_list, request_params):
device_data = dict()
for device_id in device_list:
db_conn = self._db_conn
try:
(ok, result) = db_conn.dbe_read(
"physical-router", device_id,
['physical_router_user_credentials',
'physical_router_management_ip', 'fq_name',
'physical_router_device_family',
'physical_router_vendor_name',
'physical_router_product_name',
'fabric_refs'])
if not ok:
self.config_object_error(device_id, None,
"physical-router ",
'execute_job', result)
raise cfgm_common.exceptions.HttpError(500, result)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
device_json = {"device_management_ip": result[
'physical_router_management_ip']}
device_json.update({"device_fqname": result['fq_name']})
user_cred = result.get('physical_router_user_credentials')
if user_cred:
device_json.update({"device_username": user_cred['username']})
device_json.update({"device_password":
user_cred['password']})
device_family = result.get("physical_router_device_family")
if device_family:
device_json.update({"device_family": device_family})
device_vendor_name = result.get("physical_router_vendor_name")
if device_vendor_name:
device_json.update({"device_vendor": device_vendor_name})
device_product_name = result.get("physical_router_product_name")
if device_product_name:
device_json.update({"device_product": device_product_name})
device_data.update({device_id: device_json})
fabric_refs = result.get('fabric_refs')
if fabric_refs and len(fabric_refs) > 0:
fabric_fq_name = result.get('fabric_refs')[0].get('to')
fabric_fq_name_str = ':'.join(map(str, fabric_fq_name))
request_params['fabric_fq_name'] = fabric_fq_name_str
if len(device_data) > 0:
request_params.update({"device_json": device_data})
@classmethod
def _validate_simple_type(cls, type_name, xsd_type, simple_type, value, restrictions=None):
if value is None:
return
elif xsd_type in ('unsignedLong', 'integer'):
if not isinstance(value, (int, long)):
# If value is not an integer, then try to convert it to integer
try:
value = int(value)
except (TypeError, ValueError):
raise ValueError('%s: integer value expected instead of %s' %(
type_name, value))
if restrictions:
if not (int(restrictions[0]) <= value <= int(restrictions[1])):
raise ValueError('%s: value must be between %s and %s' %(
type_name, restrictions[0], restrictions[1]))
elif xsd_type == 'boolean':
if not isinstance(value, bool):
raise ValueError('%s: true/false expected instead of %s' %(
type_name, value))
elif xsd_type == 'string' and simple_type == 'CommunityAttribute':
cls._validate_communityattribute_type(value)
elif xsd_type == 'string' and simple_type == 'ServiceInterfaceType':
cls._validate_serviceinterface_type(value)
else:
if not isinstance(value, basestring):
raise ValueError('%s: string value expected instead of %s' %(
type_name, value))
if restrictions and value not in restrictions:
raise ValueError('%s: value must be one of %s' % (
type_name, str(restrictions)))
return value
# end _validate_simple_type
def _check_mandatory_props_list(self, prop_name):
return prop_name in _MANDATORY_PROPS
# end _check_mandatory_props_list
def _validate_props_in_request(self, resource_class, obj_dict, operation):
for prop_name in resource_class.prop_fields:
prop_field_types = resource_class.prop_field_types[prop_name]
is_simple = not prop_field_types['is_complex']
prop_type = prop_field_types['xsd_type']
restrictions = prop_field_types['restrictions']
simple_type = prop_field_types['simple_type']
is_list_prop = prop_name in resource_class.prop_list_fields
is_map_prop = prop_name in resource_class.prop_map_fields
prop_value = obj_dict.get(prop_name)
if not prop_value:
if operation == 'CREATE' and (
prop_field_types['required'] == 'required'):
if self._check_mandatory_props_list(prop_name):
err_msg = '%s property is missing' %prop_name
return False, err_msg
continue
if is_simple:
try:
obj_dict[prop_name] = self._validate_simple_type(prop_name,
prop_type, simple_type,
prop_value, restrictions)
except Exception as e:
err_msg = 'Error validating property ' + str(e)
return False, err_msg
else:
continue
prop_cls = cfgm_common.utils.str_to_class(prop_type, __name__)
if isinstance(prop_value, dict):
try:
self._validate_complex_type(prop_cls, prop_value)
except Exception as e:
err_msg = 'Error validating property %s value %s ' %(
prop_name, prop_value)
err_msg += str(e)
return False, err_msg
else: # complex-type + value isn't dict or wrapped in list or map
err_msg = 'Error in property %s type %s value of %s ' %(
prop_name, prop_cls, prop_value)
return False, err_msg
# end for all properties
return True, ''
# end _validate_props_in_request
def _validate_refs_in_request(self, resource_class, obj_dict):
for ref_name in resource_class.ref_fields:
ref_fld_types_list = list(resource_class.ref_field_types[ref_name])
ref_link_type = ref_fld_types_list[1]
if ref_link_type == 'None':
continue
attr_cls = cfgm_common.utils.str_to_class(ref_link_type, __name__)
for ref_dict in obj_dict.get(ref_name) or []:
try:
self._validate_complex_type(attr_cls, ref_dict['attr'])
except Exception as e:
err_msg = 'Error validating reference %s value %s ' \
%(ref_name, ref_dict)
err_msg += str(e)
return False, err_msg
return True, ''
# end _validate_refs_in_request
def _validate_perms_in_request(self, resource_class, obj_type, obj_dict):
for ref_name in resource_class.ref_fields:
for ref in obj_dict.get(ref_name) or []:
try:
ref_uuid = ref['uuid']
except KeyError:
ref_uuid = self._db_conn.fq_name_to_uuid(ref_name[:-5],
ref['to'])
(ok, status) = self._permissions.check_perms_link(
get_request(), ref_uuid)
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
# end _validate_perms_in_request
def _validate_resource_type(self, type):
try:
r_class = self.get_resource_class(type)
return r_class.resource_type, r_class
except TypeError:
raise cfgm_common.exceptions.HttpError(
404, "Resource type '%s' not found" % type)
# end _validate_resource_type
def _ensure_services_conn(
self, api_name, obj_type, obj_uuid=None, obj_fq_name=None):
# If not connected to zookeeper do not allow operations that
# causes the state change
if not self._db_conn._zk_db.is_connected():
errmsg = 'No connection to zookeeper.'
fq_name_str = ':'.join(obj_fq_name or [])
self.config_object_error(
obj_uuid, fq_name_str, obj_type, api_name, errmsg)
raise cfgm_common.exceptions.HttpError(503, errmsg)
# If there are too many pending updates to rabbit, do not allow
# operations that cause state change
npending = self._db_conn.dbe_oper_publish_pending()
if (npending >= int(self._args.rabbit_max_pending_updates)):
err_str = str(MaxRabbitPendingError(npending))
raise cfgm_common.exceptions.HttpError(500, err_str)
# end _ensure_services_conn
def undo(self, result, obj_type, id=None, fq_name=None, counter=None, value=0):
(code, msg) = result
if counter:
counter = counter + value
get_context().invoke_undo(code, msg, self.config_log)
failed_stage = get_context().get_state()
self.config_object_error(
id, fq_name, obj_type, failed_stage, msg)
# end undo
# http_resource_<oper> - handlers invoked from
# a. bottle route (on-the-wire) OR
# b. internal requests
# using normalized get_request() from ApiContext
@log_api_stats
def http_resource_create(self, obj_type):
resource_type, r_class = self._validate_resource_type(obj_type)
obj_dict = get_request().json[resource_type]
# check visibility
user_visible = (obj_dict.get('id_perms') or {}).get('user_visible', True)
if not user_visible and not self.is_admin_request():
result = 'This object is not visible by users'
self.config_object_error(None, None, obj_type, 'http_post', result)
raise cfgm_common.exceptions.HttpError(400, result)
self._post_validate(obj_type, obj_dict=obj_dict)
fq_name = obj_dict['fq_name']
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_create' %(obj_type), obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_create an extension had error for %s' \
%(obj_type, obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# properties validator
ok, result = self._validate_props_in_request(r_class,
obj_dict, operation='CREATE')
if not ok:
result = 'Bad property in create: ' + result
raise cfgm_common.exceptions.HttpError(400, result)
# references validator
ok, result = self._validate_refs_in_request(r_class, obj_dict)
if not ok:
result = 'Bad reference in create: ' + result
raise cfgm_common.exceptions.HttpError(400, result)
# Can abort resource creation and retrun 202 status code
get_context().set_state('PENDING_DBE_CREATE')
ok, result = r_class.pending_dbe_create(obj_dict)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
if ok and isinstance(result, tuple) and result[0] == 202:
# Creation accepted but not applied, pending delete return 202 HTTP
# OK code to aware clients
pending_obj_dict = result[1]
bottle.response.status = 202
rsp_body = {}
rsp_body['fq_name'] = pending_obj_dict['fq_name']
rsp_body['uuid'] = pending_obj_dict['uuid']
rsp_body['name'] = pending_obj_dict['fq_name'][-1]
rsp_body['href'] = self.generate_url(resource_type,
pending_obj_dict['uuid'])
rsp_body['parent_type'] = pending_obj_dict['parent_type']
rsp_body['parent_uuid'] = pending_obj_dict['parent_uuid']
rsp_body['parent_href'] = self.generate_url(
pending_obj_dict['parent_type'],pending_obj_dict['parent_uuid'])
return {resource_type: rsp_body}
get_context().set_state('PRE_DBE_ALLOC')
# type-specific hook
ok, result = r_class.pre_dbe_alloc(obj_dict)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
# common handling for all resource create
(ok, result) = self._post_common(obj_type, obj_dict)
if not ok:
(code, msg) = result
fq_name_str = ':'.join(obj_dict.get('fq_name', []))
self.config_object_error(None, fq_name_str, obj_type, 'http_post', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
uuid_in_req = result
name = obj_dict['fq_name'][-1]
fq_name = obj_dict['fq_name']
db_conn = self._db_conn
# if client gave parent_type of config-root, ignore and remove
if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
del obj_dict['parent_type']
parent_class = None
if 'parent_type' in obj_dict:
# non config-root child, verify parent exists
parent_res_type, parent_class = self._validate_resource_type(
obj_dict['parent_type'])
parent_obj_type = parent_class.object_type
parent_res_type = parent_class.resource_type
parent_fq_name = obj_dict['fq_name'][:-1]
try:
parent_uuid = self._db_conn.fq_name_to_uuid(parent_obj_type,
parent_fq_name)
(ok, status) = self._permissions.check_perms_write(
get_request(), parent_uuid)
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
self._permissions.set_user_role(get_request(), obj_dict)
obj_dict['parent_uuid'] = parent_uuid
except NoIdError:
err_msg = 'Parent %s type %s does not exist' % (
pformat(parent_fq_name), parent_res_type)
fq_name_str = ':'.join(parent_fq_name)
self.config_object_error(None, fq_name_str, obj_type, 'http_post', err_msg)
raise cfgm_common.exceptions.HttpError(400, err_msg)
# Validate perms on references
try:
self._validate_perms_in_request(r_class, obj_type, obj_dict)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
400, 'Unknown reference in resource create %s.' %(obj_dict))
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
quota_counter = []
def stateful_create():
get_context().set_state('DBE_ALLOC')
# Alloc and Store id-mappings before creating entry on pubsub store.
# Else a subscriber can ask for an id mapping before we have stored it
(ok, result) = db_conn.dbe_alloc(obj_type, obj_dict, uuid_in_req)
if not ok:
return (ok, result)
get_context().push_undo(db_conn.dbe_release, obj_type, fq_name)
obj_id = result
env = get_request().headers.environ
tenant_name = env.get(hdr_server_tenant()) or 'default-project'
get_context().set_state('PRE_DBE_CREATE')
# type-specific hook
(ok, result) = r_class.pre_dbe_create(
tenant_name, obj_dict, db_conn)
if not ok:
return (ok, result)
callable = getattr(r_class, 'http_post_collection_fail', None)
if callable:
cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
ok, quota_limit, proj_uuid = r_class.get_quota_for_resource(obj_type,
obj_dict, db_conn)
if not ok:
return ok, quota_limit
get_context().set_state('DBE_CREATE')
if quota_limit >= 0:
path = self._path_prefix + proj_uuid + "/" + obj_type
if not self.quota_counter.get(path):
# Init quota counter
path_prefix = self._path_prefix + proj_uuid
try:
QuotaHelper._zk_quota_counter_init(
path_prefix, {obj_type: quota_limit}, proj_uuid,
self._db_conn, self.quota_counter)
except NoIdError:
msg = "Error in initializing quota "\
"Internal error : Failed to read resource count"
return (False, (404, msg))
(ok, result) = QuotaHelper.verify_quota_and_create_resource(
db_conn, obj_dict, obj_type, obj_id,
quota_limit, self.quota_counter[path])
if not ok:
return (ok, result)
else:
# To be used for reverting back count when undo() is called
quota_counter.append(self.quota_counter[path])
else:
#normal execution
(ok, result) = db_conn.dbe_create(obj_type, obj_id, obj_dict)
if not ok:
return (ok, result)
get_context().set_state('POST_DBE_CREATE')
# type-specific hook
try:
ok, result = r_class.post_dbe_create(tenant_name, obj_dict, db_conn)
except Exception as e:
ok = False
msg = ("%s:%s post_dbe_create had an exception: %s\n%s" %
(obj_type, obj_id, str(e),
cfgm_common.utils.detailed_traceback()))
result = (None, msg)
if not ok:
# Create is done, log to system, no point in informing user
self.config_log(result[1], level=SandeshLevel.SYS_ERR)
return True, obj_id
# end stateful_create
try:
ok, result = stateful_create()
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, err_msg)
if not ok:
fq_name_str = ':'.join(fq_name)
self.undo(result, obj_type, fq_name=fq_name_str,
counter=quota_counter, value=-1)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
# Initialize quota counter if resource is project
if resource_type == 'project' and 'quota' in obj_dict:
proj_id = obj_dict['uuid']
quota_dict = obj_dict.get('quota')
path_prefix = self._path_prefix + proj_id
if quota_dict:
try:
QuotaHelper._zk_quota_counter_init(path_prefix, quota_dict,
proj_id, db_conn, self.quota_counter)
except NoIdError:
err_msg = "Error in initializing quota "\
"Internal error : Failed to read resource count"
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
rsp_body = {}
rsp_body['name'] = name
rsp_body['fq_name'] = fq_name
rsp_body['uuid'] = result
rsp_body['href'] = self.generate_url(resource_type, result)
if parent_class:
# non config-root child, send back parent uuid/href
rsp_body['parent_type'] = obj_dict['parent_type']
rsp_body['parent_uuid'] = parent_uuid
rsp_body['parent_href'] = self.generate_url(parent_res_type,
parent_uuid)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_create' %(obj_type), obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In post_%s_create an extension had error for %s' \
%(obj_type, obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
return {resource_type: rsp_body}
# end http_resource_create
@log_api_stats
def http_resource_read(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_read' %(obj_type), id)
except Exception as e:
pass
etag = get_request().headers.get('If-None-Match')
db_conn = self._db_conn
try:
req_obj_type = db_conn.uuid_to_obj_type(id)
if req_obj_type != obj_type:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
fq_name = db_conn.uuid_to_fq_name(id)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
# common handling for all resource get
(ok, result) = self._get_common(get_request(), id)
if not ok:
(code, msg) = result
self.config_object_error(
id, None, obj_type, 'http_get', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
db_conn = self._db_conn
if etag:
(ok, result) = db_conn.dbe_is_latest(id, etag.strip('"'))
if not ok:
# Not present in DB
self.config_object_error(
id, None, obj_type, 'http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
is_latest = result
if is_latest:
# send Not-Modified, caches use this for read optimization
bottle.response.status = 304
return
# end if etag
# Generate field list for db layer
obj_fields = r_class.prop_fields | r_class.ref_fields
if 'fields' in get_request().query:
obj_fields |= set(get_request().query.fields.split(','))
else: # default props + children + refs + backrefs
if 'exclude_back_refs' not in get_request().query:
obj_fields |= r_class.backref_fields
if 'exclude_children' not in get_request().query:
obj_fields |= r_class.children_fields
(ok, result) = r_class.pre_dbe_read(id, fq_name, db_conn)
if not ok:
(code, msg) = result
raise cfgm_common.exceptions.HttpError(code, msg)
try:
(ok, result) = db_conn.dbe_read(obj_type, id,
list(obj_fields), ret_readonly=True)
if not ok:
self.config_object_error(id, None, obj_type, 'http_get', result)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
raise cfgm_common.exceptions.HttpError(500, result)
# check visibility
if (not result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(id, None, obj_type, 'http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
if not self.is_admin_request():
result = self.obj_view(resource_type, result)
(ok, err_msg) = r_class.post_dbe_read(result, db_conn)
if not ok:
(code, msg) = err_msg
raise cfgm_common.exceptions.HttpError(code, msg)
rsp_body = {}
rsp_body['uuid'] = id
rsp_body['name'] = result['fq_name'][-1]
if 'exclude_hrefs' not in get_request().query:
result = self.generate_hrefs(resource_type, result)
rsp_body.update(result)
id_perms = result['id_perms']
bottle.response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_read' %(obj_type), id, rsp_body)
except Exception as e:
pass
return {resource_type: rsp_body}
# end http_resource_read
# filter object references based on permissions
def obj_view(self, resource_type, obj_dict):
ret_obj_dict = {}
ret_obj_dict.update(obj_dict)
r_class = self.get_resource_class(resource_type)
obj_links = r_class.obj_links & set(obj_dict.keys())
obj_uuids = [ref['uuid'] for link in obj_links for ref in list(obj_dict[link])]
obj_dicts = self._db_conn._object_db.object_raw_read(
r_class.object_type, obj_uuids, ["perms2"])
uuid_to_obj_dict = dict((o['uuid'], o) for o in obj_dicts)
for link_field in obj_links:
links = obj_dict[link_field]
# build new links in returned dict based on permissions on linked object
ret_obj_dict[link_field] = [l for l in links
if ((l['uuid'] in uuid_to_obj_dict) and
(self._permissions.check_perms_read( get_request(),
l['uuid'], obj_dict=uuid_to_obj_dict[l['uuid']])[0] == True))]
return ret_obj_dict
# end obj_view
@log_api_stats
def http_resource_update(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
# Early return if there is no body or an empty body
request = get_request()
req_json = request.json
if not req_json or not req_json[resource_type]:
return
obj_dict = get_request().json[resource_type]
if 'perms2' in obj_dict:
if 'owner' not in obj_dict['perms2']:
raise cfgm_common.exceptions.HttpError(400,
'owner in perms2 must be present')
fields = r_class.prop_fields | r_class.ref_fields
try:
ok, result = self._db_conn.dbe_read(obj_type, id, fields)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
self.config_object_error(id, None, obj_type, 'http_resource_update',
result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
db_obj_dict = result
# Look if the resource have a pending version, if yes use it as resource
# to update
if hasattr(r_class, 'get_pending_resource'):
ok, result = r_class.get_pending_resource(db_obj_dict, fields)
if ok and isinstance(result, dict):
db_obj_dict = result
id = obj_dict['uuid'] = db_obj_dict['uuid']
if not ok and result[0] != 404:
self.config_object_error(
id, None, obj_type, 'http_resource_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
if resource_type == 'project' and 'quota' in db_obj_dict:
old_quota_dict = db_obj_dict['quota']
else:
old_quota_dict = None
self._put_common(
'http_put', obj_type, id, db_obj_dict, req_obj_dict=obj_dict,
quota_dict=old_quota_dict)
rsp_body = {}
rsp_body['uuid'] = id
rsp_body['href'] = self.generate_url(resource_type, id)
return {resource_type: rsp_body}
# end http_resource_update
@log_api_stats
def http_resource_delete(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
db_conn = self._db_conn
# if obj doesn't exist return early
try:
req_obj_type = db_conn.uuid_to_obj_type(id)
if req_obj_type != obj_type:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
_ = db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'ID %s does not exist' %(id))
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_delete' %(obj_type), id)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_delete an extension had error for %s' \
%(obj_type, id)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# read in obj from db (accepting error) to get details of it
try:
(read_ok, read_result) = db_conn.dbe_read(obj_type, id)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
if not read_ok:
self.config_object_error(
id, None, obj_type, 'http_delete', read_result)
# proceed down to delete the resource
# check visibility
if (not read_result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(id, None, obj_type, 'http_delete', result)
raise cfgm_common.exceptions.HttpError(404, result)
# common handling for all resource delete
parent_uuid = read_result.get('parent_uuid')
(ok, del_result) = self._delete_common(
get_request(), obj_type, id, parent_uuid)
if not ok:
(code, msg) = del_result
self.config_object_error(id, None, obj_type, 'http_delete', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
# Permit abort resource deletion and retrun 202 status code
get_context().set_state('PENDING_DBE_DELETE')
ok, result = r_class.pending_dbe_delete(read_result)
if (not ok and isinstance(result, tuple) and result[0] == 409 and
isinstance(result[1], set)):
# Found back reference to existing enforced or draft resource
exist_hrefs = [self.generate_url(type, uuid)
for type, uuid in result[1]]
msg = "Delete when resource still referred: %s" % exist_hrefs
self.config_object_error(id, None, obj_type, 'http_delete', msg)
raise cfgm_common.exceptions.HttpError(409, msg)
elif ok and isinstance(result, tuple) and result[0] == 202:
# Deletion accepted but not applied, pending delete
# return 202 HTTP OK code to aware clients
bottle.response.status = 202
return
elif not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
# fail if non-default children or non-derived backrefs exist
for child_field in r_class.children_fields:
child_type, is_derived = r_class.children_field_types[child_field]
if is_derived:
continue
child_cls = self.get_resource_class(child_type)
default_child_name = 'default-%s' %(
child_cls(parent_type=obj_type).get_type())
exist_hrefs = []
for child in read_result.get(child_field, []):
if child['to'][-1] in [default_child_name,
POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT]:
continue
exist_hrefs.append(
self.generate_url(child_type, child['uuid']))
if exist_hrefs:
err_msg = 'Delete when children still present: %s' %(
exist_hrefs)
self.config_object_error(
id, None, obj_type, 'http_delete', err_msg)
raise cfgm_common.exceptions.HttpError(409, err_msg)
relaxed_refs = set(db_conn.dbe_get_relaxed_refs(id))
for backref_field in r_class.backref_fields:
backref_type, _, is_derived = \
r_class.backref_field_types[backref_field]
if is_derived:
continue
exist_hrefs = [self.generate_url(backref_type, backref['uuid'])
for backref in read_result.get(backref_field, [])
if backref['uuid'] not in relaxed_refs]
if exist_hrefs:
err_msg = 'Delete when resource still referred: %s' %(
exist_hrefs)
self.config_object_error(
id, None, obj_type, 'http_delete', err_msg)
raise cfgm_common.exceptions.HttpError(409, err_msg)
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
quota_counter = []
def stateful_delete():
get_context().set_state('PRE_DBE_DELETE')
proj_id = r_class.get_project_id_for_resource(read_result, obj_type,
db_conn)
(ok, del_result) = r_class.pre_dbe_delete(
id, read_result, db_conn)
if not ok:
return (ok, del_result)
# Delete default children first
for child_field in r_class.children_fields:
child_type, is_derived = r_class.children_field_types[child_field]
if is_derived:
continue
if child_field in self._GENERATE_DEFAULT_INSTANCE:
self.delete_default_children(child_type, read_result)
callable = getattr(r_class, 'http_delete_fail', None)
if callable:
cleanup_on_failure.append((callable, [id, read_result, db_conn]))
get_context().set_state('DBE_DELETE')
(ok, del_result) = db_conn.dbe_delete(obj_type, id, read_result)
if not ok:
return (ok, del_result)
if proj_id:
(ok, proj_dict) = QuotaHelper.get_project_dict_for_quota(
proj_id, db_conn)
if not ok:
return ok, proj_dict
quota_limit = QuotaHelper.get_quota_limit(proj_dict, obj_type)
path = self._path_prefix + proj_id + "/" + obj_type
if quota_limit > 0:
if self.quota_counter.get(path):
self.quota_counter[path] -= 1
else:
# quota counter obj not initialized
# in this api-server, Init counter
path_prefix = self._path_prefix + proj_id
QuotaHelper._zk_quota_counter_init(
path_prefix, {obj_type : quota_limit},
proj_id, db_conn, self.quota_counter)
if db_conn._zk_db.quota_counter_exists(path):
self.quota_counter[path] -= 1
quota_counter.append(self.quota_counter.get(path))
elif self.quota_counter.get(path):
# quota limit is modified to unlimited
# delete counter object
del self.quota_counter[path]
# type-specific hook
get_context().set_state('POST_DBE_DELETE')
try:
ok, result = r_class.post_dbe_delete(id, read_result, db_conn)
except Exception as e:
ok = False
msg = ("%s:%s post_dbe_delete had an exception: %s\n%s" %
(obj_type, id, str(e),
cfgm_common.utils.detailed_traceback()))
result = (None, msg)
if not ok:
# Delete is done, log to system, no point in informing user
self.config_log(result[1], level=SandeshLevel.SYS_ERR)
return (True, '')
# end stateful_delete
try:
ok, result = stateful_delete()
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, err_msg)
if not ok:
self.undo(result, obj_type, id=id, counter=quota_counter, value=1)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_delete' %(obj_type), id, read_result)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_delete an extension had error for %s' \
%(obj_type, id)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# end http_resource_delete
@log_api_stats
def http_resource_list(self, obj_type):
resource_type, r_class = self._validate_resource_type(obj_type)
db_conn = self._db_conn
env = get_request().headers.environ
parent_uuids = None
back_ref_uuids = None
obj_uuids = None
pagination = {}
if 'parent_fq_name_str' in get_request().query:
parent_uuids = []
parent_fq_name = get_request().query.parent_fq_name_str.split(':')
parent_types = r_class.parent_types
if 'parent_type' in get_request().query:
parent_types = [get_request().query.parent_type]
for parent_type in parent_types:
_, p_class = self._validate_resource_type(parent_type)
try:
parent_uuids.append(
self._db_conn.fq_name_to_uuid(p_class.object_type,
parent_fq_name),
)
except cfgm_common.exceptions.NoIdError:
pass
elif 'parent_id' in get_request().query:
parent_uuids = get_request().query.parent_id.split(',')
if 'back_ref_id' in get_request().query:
back_ref_uuids = get_request().query.back_ref_id.split(',')
if 'obj_uuids' in get_request().query:
obj_uuids = get_request().query.obj_uuids.split(',')
if 'fq_names' in get_request().query:
obj_fqn_strs = get_request().query.fq_names.split(',')
obj_uuid = None
for obj_fqn_str in obj_fqn_strs:
try:
obj_fqn = obj_fqn_str.split(':')
obj_uuid = self._db_conn.fq_name_to_uuid(obj_type, obj_fqn)
if obj_uuids is None:
obj_uuids = []
obj_uuids.append(obj_uuid)
except cfgm_common.exceptions.NoIdError as e:
pass
if obj_uuids is None:
return {'%ss' %(resource_type): []}
if 'page_marker' in get_request().query:
pagination['marker'] = self._validate_page_marker(
get_request().query['page_marker'])
if 'page_limit' in get_request().query:
pagination['limit'] = self._validate_page_limit(
get_request().query['page_limit'])
# common handling for all resource get
for parent_uuid in list(parent_uuids or []):
(ok, result) = self._get_common(get_request(), parent_uuid)
if not ok:
parent_uuids.remove(parent_uuid)
if obj_uuids is None and back_ref_uuids is None and parent_uuids == []:
return {'%ss' %(resource_type): []}
if 'count' in get_request().query:
is_count = 'true' in get_request().query.count.lower()
else:
is_count = False
if 'detail' in get_request().query:
is_detail = 'true' in get_request().query.detail.lower()
else:
is_detail = False
if 'fields' in get_request().query:
req_fields = get_request().query.fields.split(',')
else:
req_fields = []
if 'shared' in get_request().query:
include_shared = 'true' in get_request().query.shared.lower()
else:
include_shared = False
try:
filters = utils.get_filters(get_request().query.filters)
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid filter ' + get_request().query.filters)
if 'exclude_hrefs' in get_request().query:
exclude_hrefs = True
else:
exclude_hrefs = False
return self._list_collection(obj_type, parent_uuids, back_ref_uuids,
obj_uuids, is_count, is_detail, filters,
req_fields, include_shared, exclude_hrefs,
pagination)
# end http_resource_list
# internal_request_<oper> - handlers of internally generated requests
# that save-ctx, generate-ctx and restore-ctx
def internal_request_create(self, resource_type, obj_json):
object_type = self.get_resource_class(resource_type).object_type
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%ss' %(resource_type),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
json_as_dict = {'%s' %(resource_type): obj_json}
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
json_as_dict, None)
set_context(context.ApiContext(internal_req=i_req))
resp = self.http_resource_create(object_type)
return True, resp
finally:
set_context(orig_context)
# end internal_request_create
def internal_request_update(self, resource_type, obj_uuid, obj_json):
object_type = self.get_resource_class(resource_type).object_type
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%ss' %(resource_type),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
json_as_dict = {'%s' %(resource_type): obj_json}
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
json_as_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.http_resource_update(object_type, obj_uuid)
return True, ""
finally:
set_context(orig_context)
# end internal_request_update
def internal_request_delete(self, resource_type, obj_uuid):
object_type = self.get_resource_class(resource_type).object_type
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%s/%s' %(resource_type, obj_uuid),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
None, None)
set_context(context.ApiContext(internal_req=i_req))
self.http_resource_delete(object_type, obj_uuid)
return True, ""
finally:
set_context(orig_context)
# end internal_request_delete
def internal_request_ref_update(self, res_type, obj_uuid, operation,
ref_res_type, ref_uuid=None,
ref_fq_name=None, attr=None,
relax_ref_for_delete=False):
req_dict = {'type': res_type,
'uuid': obj_uuid,
'operation': operation,
'ref-type': ref_res_type,
'ref-uuid': ref_uuid,
'ref-fq-name': ref_fq_name,
'attr': attr,
'relax_ref_for_delete': relax_ref_for_delete}
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/ref-update',
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
req_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.ref_update_http_post()
return True, ""
finally:
set_context(orig_context)
# end internal_request_ref_update
def internal_request_prop_collection(self, obj_uuid, updates=None):
req_dict = {
'uuid': obj_uuid,
'updates': updates or [],
}
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/ref-update',
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
req_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.prop_collection_http_post()
return True, ''
finally:
set_context(orig_context)
def alloc_vn_id(self, name):
return self._db_conn._zk_db.alloc_vn_id(name)
def alloc_tag_value_id(self, tag_type, name):
return self._db_conn._zk_db.alloc_tag_value_id(tag_type, name)
def create_default_children(self, object_type, parent_obj):
childs = self.get_resource_class(object_type).children_field_types
# Create a default child only if provisioned for
child_types = {type for _, (type, derivate) in childs.items()
if (not derivate and
type in self._GENERATE_DEFAULT_INSTANCE)}
if not child_types:
return True, ''
for child_type in child_types:
child_cls = self.get_resource_class(child_type)
child_obj_type = child_cls.object_type
child_obj = child_cls(parent_obj=parent_obj)
child_dict = child_obj.__dict__
child_dict['id_perms'] = self._get_default_id_perms()
child_dict['perms2'] = self._get_default_perms2()
(ok, result) = self._db_conn.dbe_alloc(child_obj_type, child_dict)
if not ok:
return (ok, result)
obj_id = result
# For virtual networks, allocate an ID
if child_obj_type == 'virtual_network':
child_dict['virtual_network_network_id'] = self.alloc_vn_id(
child_obj.get_fq_name_str())
(ok, result) = self._db_conn.dbe_create(child_obj_type, obj_id,
child_dict)
if not ok:
# DB Create failed, log and stop further child creation.
err_msg = "DB Create failed creating %s" % child_type
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
return (ok, result)
# recurse down type hierarchy
ok, result = self.create_default_children(child_obj_type,
child_obj)
if not ok:
return False, result
return True, ''
# end create_default_children
def delete_default_children(self, resource_type, parent_dict):
r_class = self.get_resource_class(resource_type)
for child_field in r_class.children_fields:
# Delete a default child only if provisioned for
child_type, is_derived = r_class.children_field_types[child_field]
if child_type not in self._GENERATE_DEFAULT_INSTANCE:
continue
child_cls = self.get_resource_class(child_type)
# first locate default child then delete it")
default_child_name = 'default-%s' %(child_type)
child_infos = parent_dict.get(child_field, [])
for child_info in child_infos:
if child_info['to'][-1] == default_child_name:
default_child_id = child_info['uuid']
self.http_resource_delete(child_type, default_child_id)
break
# end delete_default_children
@classmethod
def _generate_resource_crud_methods(cls, obj):
for object_type, _ in all_resource_type_tuples:
create_method = functools.partial(obj.http_resource_create,
object_type)
functools.update_wrapper(create_method, obj.http_resource_create)
setattr(obj, '%ss_http_post' %(object_type), create_method)
read_method = functools.partial(obj.http_resource_read,
object_type)
functools.update_wrapper(read_method, obj.http_resource_read)
setattr(obj, '%s_http_get' %(object_type), read_method)
update_method = functools.partial(obj.http_resource_update,
object_type)
functools.update_wrapper(update_method, obj.http_resource_update)
setattr(obj, '%s_http_put' %(object_type), update_method)
delete_method = functools.partial(obj.http_resource_delete,
object_type)
functools.update_wrapper(delete_method, obj.http_resource_delete)
setattr(obj, '%s_http_delete' %(object_type), delete_method)
list_method = functools.partial(obj.http_resource_list,
object_type)
functools.update_wrapper(list_method, obj.http_resource_list)
setattr(obj, '%ss_http_get' %(object_type), list_method)
# end _generate_resource_crud_methods
@classmethod
def _generate_resource_crud_uri(cls, obj):
for object_type, resource_type in all_resource_type_tuples:
# CRUD + list URIs of the form
# obj.route('/virtual-network/<id>', 'GET', obj.virtual_network_http_get)
# obj.route('/virtual-network/<id>', 'PUT', obj.virtual_network_http_put)
# obj.route('/virtual-network/<id>', 'DELETE', obj.virtual_network_http_delete)
# obj.route('/virtual-networks', 'POST', obj.virtual_networks_http_post)
# obj.route('/virtual-networks', 'GET', obj.virtual_networks_http_get)
# leaf resource
obj.route('/%s/<id>' %(resource_type),
'GET',
getattr(obj, '%s_http_get' %(object_type)))
obj.route('/%s/<id>' %(resource_type),
'PUT',
getattr(obj, '%s_http_put' %(object_type)))
obj.route('/%s/<id>' %(resource_type),
'DELETE',
getattr(obj, '%s_http_delete' %(object_type)))
# collection of leaf
obj.route('/%ss' %(resource_type),
'POST',
getattr(obj, '%ss_http_post' %(object_type)))
obj.route('/%ss' %(resource_type),
'GET',
getattr(obj, '%ss_http_get' %(object_type)))
# end _generate_resource_crud_uri
def __init__(self, args_str=None):
self._db_conn = None
self._resource_classes = {}
self._args = None
self._path_prefix = _DEFAULT_ZK_COUNTER_PATH_PREFIX
self.quota_counter = {}
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
self.lock_path_prefix = '%s/%s' % (self._args.cluster_id,
_DEFAULT_ZK_LOCK_PATH_PREFIX)
self.security_lock_prefix = '%s/security' % self.lock_path_prefix
# set the max size of the api requests
bottle.BaseRequest.MEMFILE_MAX = self._args.max_request_size
# multi_tenancy is ignored if aaa_mode is configured by user
if self._args.aaa_mode is not None:
if self.aaa_mode not in AAA_MODE_VALID_VALUES:
self.aaa_mode = AAA_MODE_DEFAULT_VALUE
elif self._args.multi_tenancy is not None:
# MT configured by user - determine from aaa-mode
self.aaa_mode = "cloud-admin" if self._args.multi_tenancy else "no-auth"
else:
self.aaa_mode = "cloud-admin"
# set python logging level from logging_level cmdline arg
if not self._args.logging_conf:
logging.basicConfig(level = getattr(logging, self._args.logging_level))
self._base_url = "http://%s:%s" % (self._args.listen_ip_addr,
self._args.listen_port)
# Generate LinkObjects for all entities
links = []
# Link for root
links.append(LinkObject('root', self._base_url , '/config-root',
'config-root'))
for _, resource_type in all_resource_type_tuples:
link = LinkObject('collection',
self._base_url , '/%ss' %(resource_type),
'%s' %(resource_type))
links.append(link)
for _, resource_type in all_resource_type_tuples:
link = LinkObject('resource-base',
self._base_url , '/%s' %(resource_type),
'%s' %(resource_type))
links.append(link)
self._homepage_links = links
self._pipe_start_app = None
#GreenletProfiler.set_clock_type('wall')
self._profile_info = None
for act_res in _ACTION_RESOURCES:
link = LinkObject('action', self._base_url, act_res['uri'],
act_res['link_name'], act_res['method'])
self._homepage_links.append(link)
# Register for VN delete request. Disallow delete of system default VN
self.route('/virtual-network/<id>', 'DELETE', self.virtual_network_http_delete)
self.route('/documentation/<filename:path>',
'GET', self.documentation_http_get)
self._homepage_links.insert(
0, LinkObject('documentation', self._base_url,
'/documentation/index.html',
'documentation', 'GET'))
# APIs to reserve/free block of IP address from a VN/Subnet
self.route('/virtual-network/<id>/ip-alloc',
'POST', self.vn_ip_alloc_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/ip-alloc',
'virtual-network-ip-alloc', 'POST'))
self.route('/virtual-network/<id>/ip-free',
'POST', self.vn_ip_free_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/ip-free',
'virtual-network-ip-free', 'POST'))
# APIs to find out number of ip instances from given VN subnet
self.route('/virtual-network/<id>/subnet-ip-count',
'POST', self.vn_subnet_ip_count_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/subnet-ip-count',
'virtual-network-subnet-ip-count', 'POST'))
# Enable/Disable aaa mode
self.route('/aaa-mode', 'GET', self.aaa_mode_http_get)
self.route('/aaa-mode', 'PUT', self.aaa_mode_http_put)
# Set Tag actions
self.route('/set-tag', 'POST', self.set_tag)
self._homepage_links.append(
LinkObject('action', self._base_url, '/set-tag', 'set-tag',
'POST'))
# Commit or discard draft security policy
self.route('/security-policy-draft', 'POST',
self.security_policy_draft)
self._homepage_links.append(
LinkObject('action', self._base_url, '/security-policy-draft',
'security-policy-draft', 'POST'))
# randomize the collector list
self._random_collectors = self._args.collectors
self._chksum = "";
if self._args.collectors:
self._chksum = hashlib.md5(''.join(self._args.collectors)).hexdigest()
self._random_collectors = random.sample(self._args.collectors, \
len(self._args.collectors))
# sandesh init
self._sandesh = Sandesh()
# Reset the sandesh send rate limit value
if self._args.sandesh_send_rate_limit is not None:
SandeshSystem.set_sandesh_send_rate_limit(
self._args.sandesh_send_rate_limit)
module = Module.API_SERVER
module_name = ModuleNames[Module.API_SERVER]
node_type = Module2NodeType[module]
node_type_name = NodeTypeNames[node_type]
self.table = "ObjectConfigNode"
if self._args.worker_id:
instance_id = self._args.worker_id
else:
instance_id = INSTANCE_ID_DEFAULT
hostname = socket.gethostname()
self._sandesh.init_generator(module_name, hostname,
node_type_name, instance_id,
self._random_collectors,
'vnc_api_server_context',
int(self._args.http_server_port),
['cfgm_common', 'vnc_cfg_api_server.sandesh'],
logger_class=self._args.logger_class,
logger_config_file=self._args.logging_conf,
config=self._args.sandesh_config)
self._sandesh.trace_buffer_create(name="VncCfgTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="RestApiTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="DBRequestTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="DBUVERequestTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="MessageBusNotifyTraceBuf",
size=1000)
VncGreenlet.register_sandesh_handler()
self._sandesh.set_logging_params(
enable_local_log=self._args.log_local,
category=self._args.log_category,
level=self._args.log_level,
file=self._args.log_file,
enable_syslog=self._args.use_syslog,
syslog_facility=self._args.syslog_facility)
ConnectionState.init(self._sandesh, hostname, module_name,
instance_id,
staticmethod(ConnectionState.get_conn_state_cb),
NodeStatusUVE, NodeStatus, self.table)
# Address Management interface
addr_mgmt = vnc_addr_mgmt.AddrMgmt(self)
self._addr_mgmt = addr_mgmt
# DB interface initialization
if self._args.wipe_config:
self._db_connect(True)
else:
self._db_connect(self._args.reset_config)
self._db_init_entries()
# ZK quota counter initialization
(ok, project_list, _) = self._db_conn.dbe_list('project',
field_names=['quota'])
if not ok:
(code, err_msg) = project_list # status
raise cfgm_common.exceptions.HttpError(code, err_msg)
for project in project_list or []:
if project.get('quota'):
path_prefix = self._path_prefix + project['uuid']
try:
QuotaHelper._zk_quota_counter_init(
path_prefix, project['quota'], project['uuid'],
self._db_conn, self.quota_counter)
except NoIdError:
err_msg = "Error in initializing quota "\
"Internal error : Failed to read resource count"
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
# API/Permissions check
# after db init (uses db_conn)
self._rbac = vnc_rbac.VncRbac(self, self._db_conn)
self._permissions = vnc_perms.VncPermissions(self, self._args)
if self.is_rbac_enabled():
self._create_default_rbac_rule()
if self.is_auth_needed():
self._generate_obj_view_links()
if os.path.exists('/usr/bin/contrail-version'):
cfgm_cpu_uve = ModuleCpuState()
cfgm_cpu_uve.name = socket.gethostname()
cfgm_cpu_uve.config_node_ip = self.get_server_ip()
command = "contrail-version contrail-config | grep 'contrail-config'"
version = os.popen(command).read()
_, rpm_version, build_num = version.split()
cfgm_cpu_uve.build_info = build_info + '"build-id" : "' + \
rpm_version + '", "build-number" : "' + \
build_num + '"}]}'
cpu_info_trace = ModuleCpuStateTrace(data=cfgm_cpu_uve, sandesh=self._sandesh)
cpu_info_trace.send(sandesh=self._sandesh)
self.re_uuid = re.compile('^[0-9A-F]{8}-?[0-9A-F]{4}-?4[0-9A-F]{3}-?[89AB][0-9A-F]{3}-?[0-9A-F]{12}$',
re.IGNORECASE)
# Load extensions
self._extension_mgrs = {}
self._load_extensions()
# Authn/z interface
if self._args.auth == 'keystone':
auth_svc = vnc_auth_keystone.AuthServiceKeystone(self, self._args)
else:
auth_svc = vnc_auth.AuthService(self, self._args)
self._pipe_start_app = auth_svc.get_middleware_app()
self._auth_svc = auth_svc
if int(self._args.worker_id) == 0:
try:
self._extension_mgrs['resync'].map(
self._resync_domains_projects)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
# following allowed without authentication
self.white_list = [
'^/documentation', # allow all documentation
'^/$', # allow discovery
]
self._global_asn = None
# map of running job instances. Key is the pid and value is job
# instance info
self._job_mgr_running_instances = {}
# end __init__
@property
def global_autonomous_system(self):
if not self._global_asn:
gsc_class = self.get_resource_class(GlobalSystemConfig.object_type)
ok, result = gsc_class.locate(uuid=self._gsc_uuid, create_it=False,
fields=['autonomous_system'])
if not ok:
msg = ("Cannot fetch Global System Config to obtain "
"autonomous system")
raise cfgm_common.exceptions.VncError(msg)
self._global_asn = result['autonomous_system']
return self._global_asn
@global_autonomous_system.setter
def global_autonomous_system(self, asn):
self._global_asn = asn
def _extensions_transform_request(self, request):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'transform_request', request)
# end _extensions_transform_request
def _extensions_validate_request(self, request):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'validate_request', request)
# end _extensions_validate_request
def _extensions_transform_response(self, request, response):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'transform_response', request, response)
# end _extensions_transform_response
@ignore_exceptions
def _generate_rest_api_request_trace(self):
method = get_request().method.upper()
if method == 'GET':
return None
req_id = get_request().headers.get('X-Request-Id',
'req-%s' %(str(uuid.uuid4())))
gevent.getcurrent().trace_request_id = req_id
url = get_request().url
if method == 'DELETE':
req_data = ''
else:
try:
req_data = json.dumps(get_request().json)
except Exception as e:
req_data = '%s: Invalid request body' %(e)
rest_trace = RestApiTrace(request_id=req_id)
rest_trace.url = url
rest_trace.method = method
rest_trace.request_data = req_data
# Also log keystone response time against this request id,
# before returning the trace message.
if ((get_context().get_keystone_response_time()) is not None):
response_time = get_context().get_keystone_response_time()
response_time_in_usec = ((response_time.days*24*60*60) +
(response_time.seconds*1000000) +
response_time.microseconds)
stats = VncApiLatencyStats(
operation_type='VALIDATE',
application='KEYSTONE',
response_time_in_usec=response_time_in_usec,
response_size=0,
identifier=req_id,
)
stats_log = VncApiLatencyStatsLog(node_name="issu-vm6", api_latency_stats=stats, sandesh=self._sandesh)
x=stats_log.send(sandesh=self._sandesh)
return rest_trace
# end _generate_rest_api_request_trace
@ignore_exceptions
def _generate_rest_api_response_trace(self, rest_trace, response):
if not rest_trace:
return
rest_trace.status = bottle.response.status
rest_trace.response_body = json.dumps(response)
rest_trace.trace_msg(name='RestApiTraceBuf', sandesh=self._sandesh)
# end _generate_rest_api_response_trace
# Public Methods
def route(self, uri, method, handler):
@use_context
def handler_trap_exception(*args, **kwargs):
try:
trace = None
self._extensions_transform_request(get_request())
self._extensions_validate_request(get_request())
trace = self._generate_rest_api_request_trace()
(ok, status) = self._rbac.validate_request(get_request())
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
response = handler(*args, **kwargs)
self._generate_rest_api_response_trace(trace, response)
self._extensions_transform_response(get_request(), response)
return response
except Exception as e:
if trace:
trace.trace_msg(name='RestApiTraceBuf',
sandesh=self._sandesh)
# don't log details of cfgm_common.exceptions.HttpError i.e handled error cases
if isinstance(e, cfgm_common.exceptions.HttpError):
bottle.abort(e.status_code, e.content)
else:
string_buf = StringIO()
cgitb_hook(file=string_buf, format="text")
err_msg = string_buf.getvalue()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
raise
self.api_bottle.route(uri, method, handler_trap_exception)
# end route
def get_args(self):
return self._args
# end get_args
def get_server_ip(self):
ip_list = []
for i in netifaces.interfaces():
try:
if netifaces.AF_INET in netifaces.ifaddresses(i):
addr = netifaces.ifaddresses(i)[netifaces.AF_INET][0][
'addr']
if addr != '127.0.0.1' and addr not in ip_list:
ip_list.append(addr)
except ValueError as e:
self.config_log("Skipping interface %s: %s" % (i, str(e)),
level=SandeshLevel.SYS_DEBUG)
return ip_list
# end get_server_ip
def get_listen_ip(self):
return self._args.listen_ip_addr
# end get_listen_ip
def get_server_port(self):
return self._args.listen_port
# end get_server_port
def get_worker_id(self):
return int(self._args.worker_id)
# end get_worker_id
def get_pipe_start_app(self):
return self._pipe_start_app
# end get_pipe_start_app
def get_rabbit_health_check_interval(self):
return float(self._args.rabbit_health_check_interval)
# end get_rabbit_health_check_interval
def is_auth_disabled(self):
return self._args.auth is None or self._args.auth.lower() != 'keystone'
def is_admin_request(self):
if not self.is_auth_needed():
return True
if is_internal_request():
return True
env = bottle.request.headers.environ
roles = []
for field in ('HTTP_X_API_ROLE', 'HTTP_X_ROLE'):
if field in env:
roles.extend(env[field].split(','))
return has_role(self.cloud_admin_role, roles)
def get_auth_headers_from_token(self, request, token):
if self.is_auth_disabled() or not self.is_auth_needed():
return {}
return self._auth_svc.get_auth_headers_from_token(request, token)
# end get_auth_headers_from_token
def _generate_obj_view_links(self):
for object_type, resource_type in all_resource_type_tuples:
r_class = self.get_resource_class(resource_type)
r_class.obj_links = (r_class.ref_fields | r_class.backref_fields | r_class.children_fields)
# Check for the system created VN. Disallow such VN delete
def virtual_network_http_delete(self, id):
db_conn = self._db_conn
# if obj doesn't exist return early
try:
obj_type = db_conn.uuid_to_obj_type(id)
if obj_type != 'virtual_network':
raise cfgm_common.exceptions.HttpError(
404, 'No virtual-network object found for id %s' %(id))
vn_name = db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'ID %s does not exist' %(id))
if (vn_name == cfgm_common.IP_FABRIC_VN_FQ_NAME or
vn_name == cfgm_common.LINK_LOCAL_VN_FQ_NAME):
raise cfgm_common.exceptions.HttpError(
409,
'Can not delete system created default virtual-network '+id)
super(VncApiServer, self).virtual_network_http_delete(id)
# end
@use_context
def homepage_http_get(self):
json_body = {}
json_links = []
# strip trailing '/' in url
url = get_request().url[:-1]
url = url.replace('<script>', '<!--script>')
url = url.replace('</script>', '</script-->')
for link in self._homepage_links:
# strip trailing '/' in url
json_links.append(
{'link': link.to_dict(with_url=url)}
)
json_body = {"href": url, "links": json_links}
return json_body
# end homepage_http_get
def documentation_http_get(self, filename):
# ubuntu packaged path
doc_root = '/usr/share/doc/contrail-config/doc/contrail-config/html/'
if not os.path.exists(doc_root):
# centos packaged path
doc_root='/usr/share/doc/python-vnc_cfg_api_server/contrial-config/html/'
return bottle.static_file(
filename,
root=doc_root)
# end documentation_http_get
def obj_perms_http_get(self):
if self.is_auth_disabled() or not self.is_auth_needed():
result = {
'token_info': None,
'is_cloud_admin_role': False,
'is_global_read_only_role': False,
'permissions': 'RWX'
}
return result
obj_uuid = None
if 'uuid' in get_request().query:
obj_uuid = get_request().query.uuid
ok, result = self._auth_svc.validate_user_token()
if not ok:
code, msg = result
self.config_object_error(obj_uuid, None, None,
'obj_perms_http_get', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
token_info = result
# roles in result['token_info']['access']['user']['roles']
result = {'token_info': token_info}
# Handle v2 and v3 responses
roles_list = []
if 'access' in token_info:
roles_list = [roles['name'] for roles in
token_info['access']['user']['roles']]
elif 'token' in token_info:
roles_list = [roles['name'] for roles in
token_info['token']['roles']]
result['is_cloud_admin_role'] = has_role(self.cloud_admin_role,
roles_list)
result['is_global_read_only_role'] = has_role(
self.global_read_only_role, roles_list)
if obj_uuid:
result['permissions'] = self._permissions.obj_perms(get_request(),
obj_uuid)
if 'token' in token_info.keys():
if 'project' in token_info['token'].keys():
domain = None
try:
domain = token_info['token']['project']['domain']['id']
domain = str(uuid.UUID(domain))
except ValueError, TypeError:
if domain == 'default':
domain = 'default-domain'
domain = self._db_conn.fq_name_to_uuid('domain', [domain])
if domain:
domain = domain.replace('-', '')
token_info['token']['project']['domain']['id'] = domain
return result
# end obj_perms_http_get
def invalid_uuid(self, uuid):
return self.re_uuid.match(uuid) is None
def invalid_access(self, access):
return type(access) is not int or access not in range(0, 8)
def invalid_share_type(self, share_type):
return share_type not in cfgm_common.PERMS2_VALID_SHARE_TYPES
# change ownership of an object
def obj_chown_http_post(self):
obj_uuid = get_request().json.get('uuid')
owner = get_request().json.get('owner')
if obj_uuid is None:
msg = "Bad Request, no resource UUID provided to chown"
raise cfgm_common.exceptions.HttpError(400, msg)
if owner is None:
msg = "Bad Request, no owner UUID provided to chown"
raise cfgm_common.exceptions.HttpError(400, msg)
if self.invalid_uuid(obj_uuid):
msg = "Bad Request, invalid resource UUID"
raise cfgm_common.exceptions.HttpError(400, msg)
if self.invalid_uuid(owner):
msg = "Bad Request, invalid owner UUID"
raise cfgm_common.exceptions.HttpError(400, msg)
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
self._ensure_services_conn('chown', obj_type, obj_uuid=obj_uuid)
# ensure user has RW permissions to object
perms = self._permissions.obj_perms(get_request(), obj_uuid)
if not 'RW' in perms:
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
try:
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=['perms2'])
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
obj_dict['perms2']['owner'] = owner
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
msg = "chown: %s owner set to %s" % (obj_uuid, owner)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
return {}
#end obj_chown_http_post
def dump_cache(self):
self._post_common(None, {})
req_dict = get_request().json or {}
obj_uuids = req_dict.get('uuids', [])
count = req_dict.get('count', 10)
return self._db_conn._object_db._obj_cache_mgr.dump_cache(
obj_uuids=obj_uuids, count=count)
# chmod for an object
def obj_chmod_http_post(self):
try:
obj_uuid = get_request().json['uuid']
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
if self.invalid_uuid(obj_uuid):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid object id")
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
self._ensure_services_conn('chmod', obj_type, obj_uuid=obj_uuid)
# ensure user has RW permissions to object
perms = self._permissions.obj_perms(get_request(), obj_uuid)
if not 'RW' in perms:
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
request_params = get_request().json
owner = request_params.get('owner')
share = request_params.get('share')
owner_access = request_params.get('owner_access')
global_access = request_params.get('global_access')
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=['perms2', 'is_shared'])
obj_perms = obj_dict['perms2']
old_perms = '%s/%d %d %s' % (obj_perms['owner'],
obj_perms['owner_access'], obj_perms['global_access'],
['%s:%d' % (item['tenant'], item['tenant_access']) for item in obj_perms['share']])
if owner:
if self.invalid_uuid(owner):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid owner")
obj_perms['owner'] = owner.replace('-','')
if owner_access is not None:
if self.invalid_access(owner_access):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid owner_access value")
obj_perms['owner_access'] = owner_access
if share is not None:
try:
for item in share:
"""
item['tenant'] := [<share_type>:] <uuid>
share_type := ['domain' | 'tenant']
"""
(share_type, share_id) = cfgm_common.utils.shareinfo_from_perms2_tenant(item['tenant'])
if self.invalid_share_type(share_type) or self.invalid_uuid(share_id) or self.invalid_access(item['tenant_access']):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid share list")
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
obj_perms['share'] = share
if global_access is not None:
if self.invalid_access(global_access):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid global_access value")
obj_perms['global_access'] = global_access
obj_dict['is_shared'] = (global_access != 0)
new_perms = '%s/%d %d %s' % (obj_perms['owner'],
obj_perms['owner_access'], obj_perms['global_access'],
['%s:%d' % (item['tenant'], item['tenant_access']) for item in obj_perms['share']])
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
msg = "chmod: %s perms old=%s, new=%s" % (obj_uuid, old_perms, new_perms)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
return {}
# end obj_chmod_http_post
def prop_collection_http_get(self):
if 'uuid' not in get_request().query:
raise cfgm_common.exceptions.HttpError(
400, 'Object uuid needed for property collection get')
obj_uuid = get_request().query.uuid
if 'fields' not in get_request().query:
raise cfgm_common.exceptions.HttpError(
400, 'Object fields needed for property collection get')
obj_fields = get_request().query.fields.split(',')
if 'position' in get_request().query:
fields_position = get_request().query.position
else:
fields_position = None
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Object Not Found: ' + obj_uuid)
resource_class = self.get_resource_class(obj_type)
for obj_field in obj_fields:
if ((obj_field not in resource_class.prop_list_fields) and
(obj_field not in resource_class.prop_map_fields)):
err_msg = '%s neither "ListProperty" nor "MapProperty"' %(
obj_field)
raise cfgm_common.exceptions.HttpError(400, err_msg)
# request validations over
# common handling for all resource get
(ok, result) = self._get_common(get_request(), obj_uuid)
if not ok:
(code, msg) = result
self.config_object_error(
obj_uuid, None, None, 'prop_collection_http_get', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
try:
ok, result = self._db_conn.prop_collection_get(
obj_type, obj_uuid, obj_fields, fields_position)
if not ok:
self.config_object_error(
obj_uuid, None, None, 'prop_collection_http_get', result)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
raise cfgm_common.exceptions.HttpError(500, result)
# check visibility
if (not result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(
id, None, None, 'prop_collection_http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
# Prepare response
del result['id_perms']
return result
# end prop_collection_http_get
def prop_collection_http_post(self):
request_params = get_request().json
# validate each requested operation
obj_uuid = request_params.get('uuid')
if not obj_uuid:
err_msg = 'Error: prop_collection_update needs obj_uuid'
raise cfgm_common.exceptions.HttpError(400, err_msg)
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Object Not Found: ' + obj_uuid)
r_class = self.get_resource_class(obj_type)
for req_param in request_params.get('updates') or []:
obj_field = req_param.get('field')
if obj_field in r_class.prop_list_fields:
prop_coll_type = 'list'
elif obj_field in r_class.prop_map_fields:
prop_coll_type = 'map'
else:
err_msg = '%s neither "ListProperty" nor "MapProperty"' %(
obj_field)
raise cfgm_common.exceptions.HttpError(400, err_msg)
req_oper = req_param.get('operation').lower()
field_val = req_param.get('value')
field_pos = str(req_param.get('position'))
prop_type = r_class.prop_field_types[obj_field]['xsd_type']
prop_cls = cfgm_common.utils.str_to_class(prop_type, __name__)
prop_val_type = prop_cls.attr_field_type_vals[prop_cls.attr_fields[0]]['attr_type']
prop_val_cls = cfgm_common.utils.str_to_class(prop_val_type, __name__)
try:
self._validate_complex_type(prop_val_cls, field_val)
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
if prop_coll_type == 'list':
if req_oper not in ('add', 'modify', 'delete'):
err_msg = 'Unsupported operation %s in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
if ((req_oper == 'add') and field_val is None):
err_msg = 'Add needs field value in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif ((req_oper == 'modify') and
None in (field_val, field_pos)):
err_msg = 'Modify needs field value and position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif ((req_oper == 'delete') and field_pos is None):
err_msg = 'Delete needs field position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif prop_coll_type == 'map':
if req_oper not in ('set', 'delete'):
err_msg = 'Unsupported operation %s in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
if ((req_oper == 'set') and field_val is None):
err_msg = 'Set needs field value in request %s' %(
req_oper, json.dumps(req_param))
elif ((req_oper == 'delete') and field_pos is None):
err_msg = 'Delete needs field position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
# Get actual resource from DB
fields = r_class.prop_fields | r_class.ref_fields
try:
ok, result = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=fields)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except Exception:
ok = False
result = cfgm_common.utils.detailed_traceback()
if not ok:
self.config_object_error(
obj_uuid, None, obj_type, 'prop_collection_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
db_obj_dict = result
# Look if the resource have a pending version, if yes use it as resource
# to update
if hasattr(r_class, 'get_pending_resource'):
ok, result = r_class.get_pending_resource(db_obj_dict, fields)
if ok and isinstance(result, dict):
db_obj_dict = result
obj_uuid = db_obj_dict['uuid']
if not ok and result[0] != 404:
self.config_object_error(obj_uuid, None, obj_type,
'prop_collection_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
self._put_common('prop-collection-update', obj_type, obj_uuid,
db_obj_dict,
req_prop_coll_updates=request_params.get('updates'))
# end prop_collection_http_post
def ref_update_http_post(self):
# grab fields
type = get_request().json.get('type')
res_type, res_class = self._validate_resource_type(type)
obj_uuid = get_request().json.get('uuid')
ref_type = get_request().json.get('ref-type')
ref_field = '%s_refs' %(ref_type.replace('-', '_'))
ref_res_type, ref_class = self._validate_resource_type(ref_type)
operation = get_request().json.get('operation')
ref_uuid = get_request().json.get('ref-uuid')
ref_fq_name = get_request().json.get('ref-fq-name')
attr = get_request().json.get('attr')
relax_ref_for_delete = get_request().json.get('relax_ref_for_delete', False)
# validate fields
if None in (res_type, obj_uuid, ref_res_type, operation):
err_msg = 'Bad Request: type/uuid/ref-type/operation is null: '
err_msg += '%s, %s, %s, %s.' \
%(res_type, obj_uuid, ref_res_type, operation)
raise cfgm_common.exceptions.HttpError(400, err_msg)
operation = operation.upper()
if operation not in ['ADD', 'DELETE']:
err_msg = 'Bad Request: operation should be add or delete: %s' \
%(operation)
raise cfgm_common.exceptions.HttpError(400, err_msg)
if not ref_uuid and not ref_fq_name:
err_msg = 'Bad Request: ref-uuid or ref-fq-name must be specified'
raise cfgm_common.exceptions.HttpError(400, err_msg)
obj_type = res_class.object_type
ref_obj_type = ref_class.object_type
if not ref_uuid:
try:
ref_uuid = self._db_conn.fq_name_to_uuid(ref_obj_type, ref_fq_name)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(ref_fq_name) + ' not found')
elif operation == 'ADD':
# if UUID provided verify existence of the reference being added
try:
ref_fq_name = self._db_conn.uuid_to_fq_name(ref_uuid)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
# To invoke type specific hook and extension manager
fields = res_class.prop_fields | res_class.ref_fields
try:
ok, result = self._db_conn.dbe_read(obj_type, obj_uuid, fields)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except Exception:
ok = False
result = cfgm_common.utils.detailed_traceback()
if not ok:
self.config_object_error(obj_uuid, None, obj_type, 'ref_update',
result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
db_obj_dict = result
# Look if the resource have a pending version, if yes use it as resource
# to update
if hasattr(res_class, 'get_pending_resource'):
ok, result = res_class.get_pending_resource(db_obj_dict, fields)
if ok and isinstance(result, dict):
db_obj_dict = result
obj_uuid = db_obj_dict['uuid']
if not ok and result[0] != 404:
self.config_object_error(
obj_uuid, None, obj_type, 'ref_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
obj_dict = {'uuid': obj_uuid}
if ref_field in db_obj_dict:
obj_dict[ref_field] = copy.deepcopy(db_obj_dict[ref_field])
if operation == 'ADD':
if ref_obj_type+'_refs' not in obj_dict:
obj_dict[ref_obj_type+'_refs'] = []
existing_ref = [ref for ref in obj_dict[ref_obj_type+'_refs']
if ref['uuid'] == ref_uuid]
if existing_ref:
ref['attr'] = attr
else:
obj_dict[ref_obj_type+'_refs'].append(
{'to':ref_fq_name, 'uuid': ref_uuid, 'attr':attr})
elif operation == 'DELETE':
for old_ref in obj_dict.get(ref_obj_type+'_refs', []):
if old_ref['to'] == ref_fq_name or old_ref['uuid'] == ref_uuid:
obj_dict[ref_obj_type+'_refs'].remove(old_ref)
break
ref_args = {'ref_obj_type':ref_obj_type, 'ref_uuid': ref_uuid,
'operation': operation, 'data': {'attr': attr},
'relax_ref_for_delete': relax_ref_for_delete}
self._put_common('ref-update', obj_type, obj_uuid, db_obj_dict,
req_obj_dict=obj_dict, ref_args=ref_args)
return {'uuid': obj_uuid}
# end ref_update_http_post
def ref_relax_for_delete_http_post(self):
self._post_common(None, {})
# grab fields
obj_uuid = get_request().json.get('uuid')
ref_uuid = get_request().json.get('ref-uuid')
# validate fields
if None in (obj_uuid, ref_uuid):
err_msg = 'Bad Request: Both uuid and ref-uuid should be specified: '
err_msg += '%s, %s.' %(obj_uuid, ref_uuid)
raise cfgm_common.exceptions.HttpError(400, err_msg)
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
self._db_conn.ref_relax_for_delete(obj_uuid, ref_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'uuid ' + obj_uuid + ' not found')
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
fq_name = self._db_conn.uuid_to_fq_name(obj_uuid)
apiConfig.identifier_name=':'.join(fq_name)
apiConfig.identifier_uuid = obj_uuid
apiConfig.operation = 'ref-relax-for-delete'
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
return {'uuid': obj_uuid}
# end ref_relax_for_delete_http_post
def fq_name_to_id_http_post(self):
self._post_common(None, {})
type = get_request().json.get('type')
res_type, r_class = self._validate_resource_type(type)
obj_type = r_class.object_type
fq_name = get_request().json['fq_name']
try:
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
if obj_type == 'project':
resource_type, r_class = self._validate_resource_type(obj_type)
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_read_fqname' %(obj_type), fq_name)
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except Exception as e:
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(fq_name) + ' not found')
else:
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(fq_name) + ' not found')
# ensure user has access to this id
ok, result = self._permissions.check_perms_read(bottle.request, id)
if not ok:
err_code, err_msg = result
raise cfgm_common.exceptions.HttpError(err_code, err_msg)
return {'uuid': id}
# end fq_name_to_id_http_post
def id_to_fq_name_http_post(self):
self._post_common(None, {})
obj_uuid = get_request().json['uuid']
# ensure user has access to this id
ok, result = self._permissions.check_perms_read(get_request(), obj_uuid)
if not ok:
err_code, err_msg = result
raise cfgm_common.exceptions.HttpError(err_code, err_msg)
try:
fq_name = self._db_conn.uuid_to_fq_name(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'UUID ' + obj_uuid + ' not found')
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
res_type = self.get_resource_class(obj_type).resource_type
return {'fq_name': fq_name, 'type': res_type}
# end id_to_fq_name_http_post
# Enables a user-agent to store and retrieve key-val pair
# TODO this should be done only for special/quantum plugin
def useragent_kv_http_post(self):
self._post_common(None, {})
request_params = get_request().json
oper = request_params.get('operation')
if oper is None:
err_msg = ("Error: Key/value store API needs 'operation' "
"parameter")
raise cfgm_common.exceptions.HttpError(400, err_msg)
if 'key' not in request_params:
err_msg = ("Error: Key/value store API needs 'key' parameter")
raise cfgm_common.exceptions.HttpError(400, err_msg)
key = request_params.get('key')
val = request_params.get('value', '')
# TODO move values to common
if oper == 'STORE':
self._db_conn.useragent_kv_store(key, val)
elif oper == 'RETRIEVE':
try:
result = self._db_conn.useragent_kv_retrieve(key)
return {'value': result}
except NoUserAgentKey:
raise cfgm_common.exceptions.HttpError(
404, "Unknown User-Agent key " + key)
elif oper == 'DELETE':
result = self._db_conn.useragent_kv_delete(key)
else:
raise cfgm_common.exceptions.HttpError(
404, "Invalid Operation " + oper)
# end useragent_kv_http_post
def db_check(self):
""" Check database for inconsistencies. No update to database """
check_result = self._db_conn.db_check()
return {'results': check_result}
# end db_check
def fetch_records(self):
""" Retrieve and return all records """
result = self._db_conn.db_read()
return {'results': result}
# end fetch_records
def start_profile(self):
#GreenletProfiler.start()
pass
# end start_profile
def stop_profile(self):
pass
#GreenletProfiler.stop()
#stats = GreenletProfiler.get_func_stats()
#self._profile_info = stats.print_all()
#return self._profile_info
# end stop_profile
def get_profile_info(self):
return self._profile_info
# end get_profile_info
def get_resource_class(self, type_str):
if type_str in self._resource_classes:
return self._resource_classes[type_str]
common_name = cfgm_common.utils.CamelCase(type_str)
server_name = '%sServer' % common_name
try:
resource_class = getattr(vnc_cfg_types, server_name)
except AttributeError:
common_class = cfgm_common.utils.str_to_class(common_name,
__name__)
if common_class is None:
raise TypeError('Invalid type: ' + type_str)
# Create Placeholder classes derived from Resource, <Type> so
# resource_class methods can be invoked in CRUD methods without
# checking for None
resource_class = type(
str(server_name),
(vnc_cfg_types.Resource, common_class, object),
{})
resource_class.server = self
self._resource_classes[resource_class.object_type] = resource_class
self._resource_classes[resource_class.resource_type] = resource_class
return resource_class
# end get_resource_class
def list_bulk_collection_http_post(self):
""" List collection when requested ids don't fit in query params."""
type = get_request().json.get('type') # e.g. virtual-network
resource_type, r_class = self._validate_resource_type(type)
try:
parent_uuids = get_request().json['parent_id'].split(',')
except KeyError:
parent_uuids = None
try:
back_ref_uuids = get_request().json['back_ref_id'].split(',')
except KeyError:
back_ref_uuids = None
try:
obj_uuids = get_request().json['obj_uuids'].split(',')
except KeyError:
obj_uuids = None
is_count = get_request().json.get('count', False)
is_detail = get_request().json.get('detail', False)
include_shared = get_request().json.get('shared', False)
try:
filters = utils.get_filters(get_request().json.get('filters'))
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid filter ' + get_request().json.get('filters'))
req_fields = get_request().json.get('fields', [])
if req_fields:
req_fields = req_fields.split(',')
exclude_hrefs = get_request().json.get('exclude_hrefs', False)
pagination = {}
if 'page_marker' in get_request().json:
pagination['marker'] = self._validate_page_marker(
get_request().json['page_marker'])
if 'page_limit' in get_request().json:
pagination['limit'] = self._validate_page_limit(
get_request().json['page_limit'])
return self._list_collection(r_class.object_type, parent_uuids,
back_ref_uuids, obj_uuids, is_count,
is_detail, filters, req_fields,
include_shared, exclude_hrefs,
pagination)
# end list_bulk_collection_http_post
# Private Methods
def _parse_args(self, args_str):
'''
Eg. python vnc_cfg_api_server.py --cassandra_server_list
10.1.2.3:9160 10.1.2.4:9160
--redis_server_ip 127.0.0.1
--redis_server_port 6382
--collectors 127.0.0.1:8086
--http_server_port 8090
--listen_ip_addr 127.0.0.1
--listen_port 8082
--admin_port 8095
--region_name RegionOne
--log_local
--log_level SYS_DEBUG
--logging_level DEBUG
--logging_conf <logger-conf-file>
--log_category test
--log_file <stdout>
--trace_file /var/log/contrail/vnc_openstack.err
--use_syslog
--syslog_facility LOG_USER
--worker_id 1
--rabbit_max_pending_updates 4096
--rabbit_health_check_interval 120.0
--cluster_id <testbed-name>
[--auth keystone]
[--default_encoding ascii ]
--object_cache_size 10000
--object_cache_exclude_types ''
--max_request_size 1024000
'''
self._args, _ = utils.parse_args(args_str)
# end _parse_args
# sigchld handler is currently not engaged. See comment @sigchld
def sigchld_handler(self):
# DB interface initialization
self._db_connect(reset_config=False)
self._db_init_entries()
# end sigchld_handler
def sigterm_handler(self):
exit()
# sighup handler for applying new configs
def sighup_handler(self):
if self._args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read(self._args.conf_file)
if 'DEFAULTS' in config.sections():
try:
collectors = config.get('DEFAULTS', 'collectors')
if type(collectors) is str:
collectors = collectors.split()
new_chksum = hashlib.md5("".join(collectors)).hexdigest()
if new_chksum != self._chksum:
self._chksum = new_chksum
self._random_collectors = random.sample(collectors, len(collectors))
# Reconnect to achieve load-balance irrespective of list
self._sandesh.reconfig_collectors(self._random_collectors)
except ConfigParser.NoOptionError as e:
pass
# end sighup_handler
def _load_extensions(self):
try:
conf_sections = self._args.config_sections
if self._args.auth != 'no-auth':
self._extension_mgrs['resync'] = ExtensionManager(
'vnc_cfg_api.resync', api_server_ip=self._args.listen_ip_addr,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh)
self._extension_mgrs['resourceApi'] = ExtensionManager(
'vnc_cfg_api.resourceApi',
propagate_map_exceptions=True,
api_server_ip=self._args.listen_ip_addr,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh)
self._extension_mgrs['neutronApi'] = ExtensionManager(
'vnc_cfg_api.neutronApi',
api_server_ip=self._args.listen_ip_addr,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh,
api_server_obj=self)
except Exception as e:
err_msg = cfgm_common.utils.detailed_traceback()
self.config_log("Exception in extension load: %s" %(err_msg),
level=SandeshLevel.SYS_ERR)
# end _load_extensions
def _db_connect(self, reset_config):
cass_server_list = self._args.cassandra_server_list
redis_server_ip = self._args.redis_server_ip
redis_server_port = self._args.redis_server_port
zk_server = self._args.zk_server_ip
rabbit_servers = self._args.rabbit_server
rabbit_port = self._args.rabbit_port
rabbit_user = self._args.rabbit_user
rabbit_password = self._args.rabbit_password
rabbit_vhost = self._args.rabbit_vhost
rabbit_ha_mode = self._args.rabbit_ha_mode
cassandra_user = self._args.cassandra_user
cassandra_password = self._args.cassandra_password
cassandra_use_ssl = self._args.cassandra_use_ssl
cassandra_ca_certs = self._args.cassandra_ca_certs
obj_cache_entries = int(self._args.object_cache_entries)
obj_cache_exclude_types = \
[t.replace('-', '_').strip() for t in
self._args.object_cache_exclude_types.split(',')]
debug_obj_cache_types = \
[t.replace('-', '_').strip() for t in
self._args.debug_object_cache_types.split(',')]
db_engine = self._args.db_engine
self._db_engine = db_engine
cred = None
db_server_list = None
if db_engine == 'cassandra':
if cassandra_user is not None and cassandra_password is not None:
cred = {'username':cassandra_user,'password':<PASSWORD>}
db_server_list = cass_server_list
self._db_conn = VncDbClient(
self, db_server_list, rabbit_servers, rabbit_port, rabbit_user,
rabbit_password, rabbit_vhost, rabbit_ha_mode, reset_config,
zk_server, self._args.cluster_id, db_credential=cred,
db_engine=db_engine, rabbit_use_ssl=self._args.rabbit_use_ssl,
kombu_ssl_version=self._args.kombu_ssl_version,
kombu_ssl_keyfile= self._args.kombu_ssl_keyfile,
kombu_ssl_certfile=self._args.kombu_ssl_certfile,
kombu_ssl_ca_certs=self._args.kombu_ssl_ca_certs,
obj_cache_entries=obj_cache_entries,
obj_cache_exclude_types=obj_cache_exclude_types,
debug_obj_cache_types=debug_obj_cache_types,
cassandra_use_ssl=self._args.cassandra_use_ssl,
cassandra_ca_certs=self._args.cassandra_ca_certs)
#TODO refacter db connection management.
self._addr_mgmt._get_db_conn()
# end _db_connect
def _ensure_id_perms_present(self, obj_uuid, obj_dict):
"""
Called at resource creation to ensure that id_perms is present in obj
"""
# retrieve object and permissions
id_perms = self._get_default_id_perms()
if (('id_perms' not in obj_dict) or
(obj_dict['id_perms'] is None)):
# Resource creation
if obj_uuid is None:
obj_dict['id_perms'] = id_perms
return
return
# retrieve the previous version of the id_perms
# from the database and update the id_perms with
# them.
if obj_uuid is not None:
try:
old_id_perms = self._db_conn.uuid_to_obj_perms(obj_uuid)
for field, value in old_id_perms.items():
if value is not None:
id_perms[field] = value
except NoIdError:
pass
# not all fields can be updated
if obj_uuid:
field_list = ['enable', 'description']
else:
field_list = ['enable', 'description', 'user_visible', 'creator']
# Start from default and update from obj_dict
req_id_perms = obj_dict['id_perms']
for key in field_list:
if key in req_id_perms:
id_perms[key] = req_id_perms[key]
# TODO handle perms present in req_id_perms
obj_dict['id_perms'] = id_perms
# end _ensure_id_perms_present
def _get_default_id_perms(self, **kwargs):
id_perms = copy.deepcopy(Provision.defaults.perms)
id_perms_json = json.dumps(id_perms, default=lambda o: dict((k, v)
for k, v in o.__dict__.iteritems()))
id_perms_dict = json.loads(id_perms_json)
id_perms_dict.update(kwargs)
return id_perms_dict
# end _get_default_id_perms
def _ensure_perms2_present(self, obj_type, obj_uuid, obj_dict,
project_id=None):
"""
Called at resource creation to ensure that id_perms is present in obj
"""
# retrieve object and permissions
perms2 = self._get_default_perms2()
# set ownership of object to creator tenant
if obj_type == 'project' and 'uuid' in obj_dict:
perms2['owner'] = str(obj_dict['uuid']).replace('-', '')
elif obj_dict.get('perms2') and obj_dict['perms2'].get('owner'):
perms2['owner'] = obj_dict['perms2']['owner']
elif 'fq_name' in obj_dict and obj_dict['fq_name'][:-1]:
if 'parent_type' in obj_dict:
parent_type = obj_dict['parent_type'].replace('-', '_')
else:
r_class = self.get_resource_class(obj_type)
if (len(r_class.parent_types) != 1):
msg = ("Ambiguous parent to ensure permissiosn of %s, "
"please choose one parent type: %s" %
(obj_type, pformat(r_class.parent_types)))
raise cfgm_common.exceptions.HttpError(400, msg)
parent_type = r_class.parent_types[0].replace('-', '_')
if parent_type == 'domain':
if project_id:
perms2['owner'] = project_id
else:
perms2['owner'] = 'cloud-admin'
else:
parent_fq_name = obj_dict['fq_name'][:-1]
parent_uuid = obj_dict.get('parent_uuid')
try:
if parent_uuid is None:
try:
parent_uuid = self._db_conn.fq_name_to_uuid(
parent_type, parent_fq_name)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Name' + pformat(parent_fq_name) + ' not found')
ok, parent_obj_dict = self._db_conn.dbe_read(
parent_type, parent_uuid, obj_fields=['perms2'])
except NoIdError as e:
msg = "Parent %s cannot be found: %s" % (parent_type, str(e))
raise cfgm_common.exceptions.HttpError(404, msg)
perms2['owner'] = parent_obj_dict['perms2']['owner']
elif project_id:
perms2['owner'] = project_id
else:
perms2['owner'] = 'cloud-admin'
if obj_dict.get('perms2') is None:
# Resource creation
if obj_uuid is None:
obj_dict['perms2'] = perms2
return
# Resource already exists
try:
obj_dict['perms2'] = self._db_conn.uuid_to_obj_perms2(obj_uuid)
except NoIdError:
obj_dict['perms2'] = perms2
return
# retrieve the previous version of the perms2
# from the database and update the perms2 with
# them.
if obj_uuid is not None:
try:
old_perms2 = self._db_conn.uuid_to_obj_perms2(obj_uuid)
for field, value in old_perms2.items():
if value is not None:
perms2[field] = value
except NoIdError:
pass
# Start from default and update from obj_dict
req_perms2 = obj_dict['perms2']
for key in req_perms2:
perms2[key] = req_perms2[key]
# TODO handle perms2 present in req_perms2
obj_dict['perms2'] = perms2
# ensure is_shared and global_access are consistent
shared = obj_dict.get('is_shared', None)
gaccess = obj_dict['perms2'].get('global_access', None)
if (gaccess is not None and shared is not None and
shared != (gaccess != 0)):
msg = ("Inconsistent is_shared (%s a) and global_access (%s)" %
(shared, gaccess))
# NOTE(ethuleau): ignore exception for the moment as it breaks the
# Neutron use case where external network have global access but
# is property 'is_shared' is False https://review.opencontrail.org/#/q/Id6a0c1a509d7663da8e5bc86f2c7c91c73d420a2
# Before patch https://review.opencontrail.org/#q,I9f53c0f21983bf191b4c51318745eb348d48dd86,n,z
# error was also ignored as all retruned errors of that method were
# not took in account
# raise cfgm_common.exceptions.HttpError(400, msg)
def _get_default_perms2(self):
perms2 = copy.deepcopy(Provision.defaults.perms2)
perms2_json = json.dumps(perms2, default=lambda o: dict((k, v)
for k, v in o.__dict__.iteritems()))
perms2_dict = json.loads(perms2_json)
return perms2_dict
# end _get_default_perms2
def _db_init_entries(self):
# create singleton defaults if they don't exist already in db
gsc = self.create_singleton_entry(GlobalSystemConfig(
autonomous_system=64512, config_version=CONFIG_VERSION))
self._gsc_uuid = gsc.uuid
gvc = self.create_singleton_entry(GlobalVrouterConfig(
parent_obj=gsc))
self.create_singleton_entry(Domain())
# Global and default policy resources
pm = self.create_singleton_entry(PolicyManagement())
self._global_pm_uuid = pm.uuid
aps = self.create_singleton_entry(ApplicationPolicySet(
parent_obj=pm, all_applications=True))
ok, result = self._db_conn.ref_update(
ApplicationPolicySet.object_type,
aps.uuid,
GlobalVrouterConfig.object_type,
gvc.uuid,
{'attr': None},
'ADD',
None,
)
if not ok:
msg = ("Error while referencing global vrouter config %s with the "
"default global application policy set %s: %s" %
(gvc.uuid, aps.uuid, result[1]))
self.config_log(msg, level=SandeshLevel.SYS_ERR)
ip_fab_vn = self.create_singleton_entry(
VirtualNetwork(cfgm_common.IP_FABRIC_VN_FQ_NAME[-1],
is_provider_network=True))
self.create_singleton_entry(
RoutingInstance(cfgm_common.IP_FABRIC_VN_FQ_NAME[-1], ip_fab_vn,
routing_instance_is_default=True))
self.create_singleton_entry(
RoutingInstance('__default__', ip_fab_vn))
link_local_vn = self.create_singleton_entry(
VirtualNetwork(cfgm_common.LINK_LOCAL_VN_FQ_NAME[-1]))
self.create_singleton_entry(
RoutingInstance('__link_local__', link_local_vn,
routing_instance_is_default=True))
# specifying alarm kwargs like contrail_alarm.py
alarm_kwargs = {"alarm_rules":
{"or_list" : [
{"and_list": [
{ "operand1": "UveConfigReq.err_info.*.",
"operation": "==",
"operand2": {"json_value": "True"}
} ]
} ]
},
"alarm_severity": 1,
"fq_name": [
"default-global-system-config",
"system-defined-bottle-request-size-limit"
],
"id_perms": {
"description": "Bottle request size limit exceeded."
},
"parent_type": "global-system-config",
"uve_keys": {
"uve_key": [
"config-node"
]
}
}
self.create_singleton_entry(Alarm(**alarm_kwargs))
try:
self.create_singleton_entry(
RoutingInstance('default-virtual-network',
routing_instance_is_default=True))
except Exception as e:
self.config_log('error while creating primary routing instance for'
'default-virtual-network: ' + str(e),
level=SandeshLevel.SYS_NOTICE)
# Create singleton SG __no_rule__ object for openstack
domain_obj = Domain(SG_NO_RULE_FQ_NAME[0])
proj_obj = Project(SG_NO_RULE_FQ_NAME[1], domain_obj)
sg_rules = PolicyEntriesType()
id_perms = IdPermsType(enable=True,
description="Security group with no rules",
user_visible=True)
perms2 = PermType2(owner='cloud-admin')
perms2.set_global_access(PERMS_RX)
sg_obj = SecurityGroup(name=SG_NO_RULE_NAME,
parent_obj=proj_obj,
security_group_entries=sg_rules.exportDict(''),
id_perms=id_perms.exportDict(''),
perms2=perms2.exportDict(''))
self.create_singleton_entry(sg_obj)
self.create_singleton_entry(DiscoveryServiceAssignment())
self.create_singleton_entry(GlobalQosConfig())
sc_ipam_subnet_v4 = IpamSubnetType(subnet=SubnetType('0.0.0.0', 8))
sc_ipam_subnet_v6 = IpamSubnetType(subnet=SubnetType('::ffff', 104))
sc_ipam_subnets = IpamSubnets([sc_ipam_subnet_v4, sc_ipam_subnet_v6])
sc_ipam_obj = NetworkIpam('service-chain-flat-ipam',
ipam_subnet_method="flat-subnet", ipam_subnets=sc_ipam_subnets)
self.create_singleton_entry(sc_ipam_obj)
# Create pre-defined tag-type
for type_str, type_id in TagTypeNameToId.items():
type_id_hex = "0x{:04x}".format(type_id)
tag = TagType(name=type_str, tag_type_id=type_id_hex)
tag.display_name = type_str
self.create_singleton_entry(tag, user_visible=False)
if int(self._args.worker_id) == 0:
self._db_conn.db_resync()
#Load init data for job playbooks like JobTemplates, Tags, etc
if self._args.enable_fabric_ansible:
self._load_init_data()
# make default ipam available across tenants for backward compatability
obj_type = 'network_ipam'
fq_name = ['default-domain', 'default-project', 'default-network-ipam']
obj_uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=['perms2'])
obj_dict['perms2']['global_access'] = PERMS_RX
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
# end _db_init_entries
# Load init data for job playbooks like JobTemplates, Tags, etc
def _load_init_data(self):
"""
This function loads init data from a data file specified by the
argument '--fabric_ansible_dir' to the database. The data file
must be in JSON format and follow the format below:
{
"data": [
{
"object_type": "<vnc object type name>",
"objects": [
{
<vnc object payload>
},
...
]
},
...
]
}
Here is an example:
{
"data": [
{
"object_type": "tag",
"objects": [
{
"fq_name": [
"fabric=management_ip"
],
"name": "fabric=management_ip",
"tag_type_name": "fabric",
"tag_value": "management_ip"
}
]
}
]
}
"""
try:
json_data = self._load_json_data()
for item in json_data.get("data"):
object_type = item.get("object_type")
# Get the class name from object type
cls_name = cfgm_common.utils.CamelCase(object_type)
# Get the class object
cls_ob = cfgm_common.utils.str_to_class(cls_name, __name__)
# saving the objects to the database
for obj in item.get("objects"):
instance_obj = cls_ob(**obj)
self.create_singleton_entry(instance_obj)
# update default-global-system-config for supported_device_families
if object_type =='global-system-config':
fq_name = instance_obj.get_fq_name()
uuid = self._db_conn.fq_name_to_uuid('global_system_config', fq_name)
self._db_conn.dbe_update(object_type, uuid, obj)
for item in json_data.get("refs"):
from_type = item.get("from_type")
from_fq_name = item.get("from_fq_name")
from_uuid = self._db_conn._object_db.fq_name_to_uuid(
from_type, from_fq_name
)
to_type = item.get("to_type")
to_fq_name = item.get("to_fq_name")
to_uuid = self._db_conn._object_db.fq_name_to_uuid(
to_type, to_fq_name
)
ok, result = self._db_conn.ref_update(
from_type,
from_uuid,
to_type,
to_uuid,
{ 'attr': None },
'ADD',
None,
)
except Exception as e:
self.config_log('error while loading init data: ' + str(e),
level=SandeshLevel.SYS_NOTICE)
# end Load init data
# Load json data from fabric_ansible_playbooks/conf directory
def _load_json_data(self):
# open the json file
with open(self._args.fabric_ansible_dir +
'/conf/predef_payloads.json') as data_file:
input_json = json.load(data_file)
# Loop through the json
for item in input_json.get("data"):
if item.get("object_type") == "job-template":
for object in item.get("objects"):
fq_name = object.get("fq_name")[-1]
schema_name = fq_name.replace('template', 'schema.json')
with open(os.path.join(self._args.fabric_ansible_dir +
'/schema/', schema_name), 'r+') as schema_file:
schema_json = json.load(schema_file)
object["job_template_input_schema"] = schema_json.get(
"input_schema")
object["job_template_output_schema"] = schema_json.get(
"output_schema")
return input_json
# end load json data
# generate default rbac group rule
def _create_default_rbac_rule(self):
# allow full access to cloud admin
rbac_rules = [
{
'rule_object':'fqname-to-id',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'id-to-fqname',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'useragent-kv',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'documentation',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'R'}]
},
{
'rule_object':'/',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'R'}]
},
]
obj_type = 'api_access_list'
fq_name = ['default-global-system-config', 'default-api-access-list']
try:
# ensure global list is not missing any default rules (bug 1642464)
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, id)
update_obj = False
cur_rbac_rules = copy.deepcopy(obj_dict['api_access_list_entries']['rbac_rule'])
for rule in rbac_rules:
present = False
for existing_rule in cur_rbac_rules:
if rule == existing_rule:
present = True
cur_rbac_rules.remove(existing_rule)
break
if not present:
obj_dict['api_access_list_entries']['rbac_rule'].append(rule)
update_obj = True
if update_obj:
self._db_conn.dbe_update(obj_type, id, obj_dict)
return
except NoIdError:
pass
rge = RbacRuleEntriesType([])
for rule in rbac_rules:
rule_perms = [RbacPermType(role_name=p['role_name'], role_crud=p['role_crud']) for p in rule['rule_perms']]
rbac_rule = RbacRuleType(rule_object=rule['rule_object'],
rule_field=rule['rule_field'], rule_perms=rule_perms)
rge.add_rbac_rule(rbac_rule)
rge_dict = rge.exportDict('')
glb_rbac_cfg = ApiAccessList(parent_type='global-system-config',
fq_name=fq_name, api_access_list_entries = rge_dict)
try:
self.create_singleton_entry(glb_rbac_cfg)
except Exception as e:
err_msg = 'Error creating default api access list object'
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
# end _create_default_rbac_rule
def _resync_domains_projects(self, ext):
if hasattr(ext.obj, 'resync_domains_projects'):
ext.obj.resync_domains_projects()
# end _resync_domains_projects
def create_singleton_entry(self, singleton_obj, user_visible=True):
s_obj = singleton_obj
obj_type = s_obj.object_type
fq_name = s_obj.get_fq_name()
# TODO remove backward compat create mapping in zk
# for singleton START
try:
cass_uuid = self._db_conn._object_db.fq_name_to_uuid(obj_type, fq_name)
try:
zk_uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
# doesn't exist in zookeeper but does so in cassandra,
# migrate this info to zookeeper
self._db_conn._zk_db.create_fq_name_to_uuid_mapping(obj_type, fq_name, str(cass_uuid))
except NoIdError:
# doesn't exist in cassandra as well as zookeeper, proceed normal
pass
# TODO backward compat END
# create if it doesn't exist yet
try:
s_obj.uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
obj_json = json.dumps(s_obj, default=_obj_serializer_all)
obj_dict = json.loads(obj_json)
if s_obj.get_id_perms():
obj_dict['id_perms'] = s_obj.get_id_perms()
else:
obj_dict['id_perms'] = self._get_default_id_perms(
user_visible=user_visible)
if s_obj.get_perms2():
obj_dict['perms2'] = s_obj.get_perms2()
else:
obj_dict['perms2'] = self._get_default_perms2()
(ok, result) = self._db_conn.dbe_alloc(obj_type, obj_dict)
obj_id = result
s_obj.uuid = obj_id
# For virtual networks, allocate an ID
if obj_type == 'virtual_network':
vn_id = self.alloc_vn_id(s_obj.get_fq_name_str())
obj_dict['virtual_network_network_id'] = vn_id
if obj_type == 'tag':
obj_dict = self._allocate_tag_id(obj_dict)
self._db_conn.dbe_create(obj_type, obj_id, obj_dict)
self.create_default_children(obj_type, s_obj)
return s_obj
# end create_singleton_entry
# allocate tag id for tag object
def _allocate_tag_id(self, obj_dict):
type_str = obj_dict['tag_type_name']
value_str = obj_dict['tag_value']
ok, result = vnc_cfg_types.TagTypeServer.locate(
[type_str], id_perms=IdPermsType(user_visible=False))
tag_type = result
obj_dict['tag_type_refs'] = [
{
'uuid': tag_type['uuid'],
'to': tag_type['fq_name'],
},
]
# Allocate ID for tag value. Use the all fq_name to distinguish same
# tag values between global and scoped
value_id = vnc_cfg_types.TagServer.vnc_zk_client.alloc_tag_value_id(
type_str, ':'.join(obj_dict['fq_name']))
# Compose Tag ID with the type ID and value ID
obj_dict['tag_id'] = "{}{:04x}".format(tag_type['tag_type_id'],
value_id)
return obj_dict
# end allocate tag id
def _validate_page_marker(self, req_page_marker):
# query params always appears as string
if req_page_marker and req_page_marker.lower() != 'none':
try:
req_page_marker_uuid = req_page_marker.split(':')[-1]
_ = str(uuid.UUID(req_page_marker_uuid))
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid page_marker %s: %s' %(
req_page_marker, e))
else:
req_page_marker = None
return req_page_marker
# end _validate_page_marker
def _validate_page_limit(self, req_page_limit):
try:
val = int(req_page_limit)
if val <= 0:
raise Exception("page_limit has to be greater than zero")
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid page_limit %s: %s' %(
req_page_limit, e))
return int(req_page_limit)
# end _validate_page_limit
def _list_collection(self, obj_type, parent_uuids=None,
back_ref_uuids=None, obj_uuids=None,
is_count=False, is_detail=False, filters=None,
req_fields=None, include_shared=False,
exclude_hrefs=False, pagination=None):
resource_type, r_class = self._validate_resource_type(obj_type)
is_admin = self.is_admin_request()
if is_admin:
field_names = req_fields
else:
field_names = [u'id_perms'] + (req_fields or [])
if is_count and is_admin:
ret_result = 0
else:
ret_result = []
page_filled = False
if 'marker' in pagination:
# if marker is None, start scanning from uuid 0
page_start = pagination['marker'] or '0'
if 'limit' in pagination:
page_count = pagination['limit']
else:
page_count = self._args.paginate_count
else:
page_start = None # cookie to start next search
page_count = None # remainder count to finish page
(ok, result) = r_class.pre_dbe_list(obj_uuids, self._db_conn)
if not ok:
(code, msg) = result
raise cfgm_common.exceptions.HttpError(code, msg)
while not page_filled:
(ok, result, ret_marker) = self._db_conn.dbe_list(obj_type,
parent_uuids, back_ref_uuids, obj_uuids, is_count and is_admin,
filters, is_detail=is_detail, field_names=field_names,
include_shared=include_shared,
paginate_start=page_start,
paginate_count=page_count)
if not ok:
self.config_object_error(None, None, '%ss' %(obj_type),
'dbe_list', result)
raise cfgm_common.exceptions.HttpError(404, result)
# If only counting, return early
if is_count and is_admin:
ret_result += result
return {'%ss' %(resource_type): {'count': ret_result}}
allowed_fields = ['uuid', 'href', 'fq_name'] + (req_fields or [])
obj_dicts = []
if is_admin:
for obj_result in result:
if not exclude_hrefs:
obj_result['href'] = self.generate_url(
resource_type, obj_result['uuid'])
if is_detail:
obj_result['name'] = obj_result['fq_name'][-1]
obj_dicts.append({resource_type: obj_result})
else:
obj_dicts.append(obj_result)
else:
for obj_result in result:
id_perms = obj_result.get('id_perms')
if not id_perms:
# It is possible that the object was deleted, but received
# an update after that. We need to ignore it for now. In
# future, we should clean up such stale objects
continue
if not id_perms.get('user_visible', True):
# skip items not authorized
continue
(ok, status) = self._permissions.check_perms_read(
get_request(), obj_result['uuid'],
obj_result)
if not ok and status[0] == 403:
continue
obj_dict = {}
if is_detail:
obj_result = self.obj_view(resource_type, obj_result)
obj_result['name'] = obj_result['fq_name'][-1]
obj_dict.update(obj_result)
obj_dicts.append({resource_type: obj_dict})
else:
obj_dict.update(obj_result)
for key in obj_dict.keys():
if not key in allowed_fields:
del obj_dict[key]
if obj_dict.get('id_perms') and not 'id_perms' in allowed_fields:
del obj_dict['id_perms']
obj_dicts.append(obj_dict)
if not exclude_hrefs:
obj_dict['href'] = self.generate_url(resource_type, obj_result['uuid'])
# end obj_result in result
# end not admin req
ret_result.extend(obj_dicts)
if 'marker' not in pagination:
page_filled = True
elif ret_marker is None: # pagination request and done
page_filled = True
else: # pagination request and partially filled
page_start = ret_marker
page_count -= len(result)
if page_count <= 0:
page_filled = True
# end while not page_filled
(ok, err_msg) = r_class.post_dbe_list(ret_result, self._db_conn)
if not ok:
(code, msg) = err_msg
raise cfgm_common.exceptions.HttpError(code, msg)
if 'marker' in pagination: # send next marker along with results
if is_count:
return {'%ss' %(resource_type): {'count': len(ret_result)},
'marker': ret_marker}
else:
return {'%ss' %(resource_type): ret_result,
'marker': ret_marker}
if is_count:
return {'%ss' %(resource_type): {'count': len(ret_result)}}
else:
return {'%ss' %(resource_type): ret_result}
# end _list_collection
def get_db_connection(self):
return self._db_conn
# end get_db_connection
def generate_url(self, resource_type, obj_uuid):
try:
url_parts = get_request().urlparts
netloc = url_parts.netloc.replace('<script>', '<!--script>')
netloc = netloc.replace('</script>', '</script-->')
return '%s://%s/%s/%s'\
% (url_parts.scheme, netloc, resource_type, obj_uuid)
except Exception as e:
return '%s/%s/%s' % (self._base_url, resource_type, obj_uuid)
# end generate_url
def generate_hrefs(self, resource_type, obj_dict):
# return a copy of obj_dict with href keys for:
# self, parent, children, refs, backrefs
# don't update obj_dict as it may be cached object
r_class = self.get_resource_class(resource_type)
ret_obj_dict = obj_dict.copy()
ret_obj_dict['href'] = self.generate_url(
resource_type, obj_dict['uuid'])
try:
ret_obj_dict['parent_href'] = self.generate_url(
obj_dict['parent_type'], obj_dict['parent_uuid'])
except KeyError:
# No parent
pass
for child_field, child_field_info in \
r_class.children_field_types.items():
try:
children = obj_dict[child_field]
child_type = child_field_info[0]
ret_obj_dict[child_field] = [
dict(c, href=self.generate_url(child_type, c['uuid']))
for c in children]
except KeyError:
# child_field doesn't exist in original
pass
# end for all child fields
for ref_field, ref_field_info in r_class.ref_field_types.items():
try:
refs = obj_dict[ref_field]
ref_type = ref_field_info[0]
ret_obj_dict[ref_field] = [
dict(r, href=self.generate_url(ref_type, r['uuid']))
for r in refs]
except KeyError:
# ref_field doesn't exist in original
pass
# end for all ref fields
for backref_field, backref_field_info in \
r_class.backref_field_types.items():
try:
backrefs = obj_dict[backref_field]
backref_type = backref_field_info[0]
ret_obj_dict[backref_field] = [
dict(b, href=self.generate_url(backref_type, b['uuid']))
for b in backrefs]
except KeyError:
# backref_field doesn't exist in original
pass
# end for all backref fields
return ret_obj_dict
# end generate_hrefs
def config_object_error(self, id, fq_name_str, obj_type,
operation, err_str):
apiConfig = VncApiCommon()
if obj_type is not None:
apiConfig.object_type = obj_type
apiConfig.identifier_name = fq_name_str
apiConfig.identifier_uuid = id
apiConfig.operation = operation
if err_str:
apiConfig.error = "%s:%s" % (obj_type, err_str)
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
# end config_object_error
def config_log(self, msg_str, level=SandeshLevel.SYS_INFO):
errcls = {
SandeshLevel.SYS_DEBUG: VncApiDebug,
SandeshLevel.SYS_INFO: VncApiInfo,
SandeshLevel.SYS_NOTICE: VncApiNotice,
SandeshLevel.SYS_ERR: VncApiError,
}
errcls.get(level, VncApiError)(
api_msg=msg_str, level=level, sandesh=self._sandesh).send(
sandesh=self._sandesh)
# end config_log
def _set_api_audit_info(self, apiConfig):
apiConfig.url = get_request().url
apiConfig.remote_ip = get_request().headers.get('Host')
useragent = get_request().headers.get('X-Contrail-Useragent')
if not useragent:
useragent = get_request().headers.get('User-Agent')
apiConfig.useragent = useragent
apiConfig.user = get_request().headers.get('X-User-Name')
apiConfig.project = get_request().headers.get('X-Project-Name')
apiConfig.domain = get_request().headers.get('X-Domain-Name', 'None')
if apiConfig.domain.lower() == 'none':
apiConfig.domain = 'default-domain'
if int(get_request().headers.get('Content-Length', 0)) > 0:
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
# end _set_api_audit_info
# uuid is parent's for collections
def _get_common(self, request, uuid=None):
# TODO check api + resource perms etc.
if self.is_auth_needed() and uuid:
if isinstance(uuid, list):
for u_id in uuid:
ok, result = self._permissions.check_perms_read(request,
u_id)
if not ok:
return ok, result
else:
return self._permissions.check_perms_read(request, uuid)
return (True, '')
# end _get_common
def _put_common(
self, api_name, obj_type, obj_uuid, db_obj_dict, req_obj_dict=None,
req_prop_coll_updates=None, ref_args=None, quota_dict=None):
obj_fq_name = db_obj_dict.get('fq_name', 'missing-fq-name')
# ZK and rabbitmq should be functional
self._ensure_services_conn(
api_name, obj_type, obj_uuid, obj_fq_name)
resource_type, r_class = self._validate_resource_type(obj_type)
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_update' %(obj_type), obj_uuid, req_obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_update an extension had error for %s' \
%(obj_type, req_obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
db_conn = self._db_conn
# check visibility
if (not db_obj_dict['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % obj_uuid
self.config_object_error(obj_uuid, None, obj_type, api_name, result)
raise cfgm_common.exceptions.HttpError(404, result)
# properties validator (for collections validation in caller)
if req_obj_dict is not None:
ok, result = self._validate_props_in_request(r_class,
req_obj_dict, operation='UPDATE')
if not ok:
result = 'Bad property in %s: %s' %(api_name, result)
raise cfgm_common.exceptions.HttpError(400, result)
# references validator
if req_obj_dict is not None:
ok, result = self._validate_refs_in_request(r_class, req_obj_dict)
if not ok:
result = 'Bad reference in %s: %s' %(api_name, result)
raise cfgm_common.exceptions.HttpError(400, result)
# common handling for all resource put
request = get_request()
fq_name_str = ":".join(obj_fq_name or [])
if req_obj_dict:
if ('id_perms' in req_obj_dict and
req_obj_dict['id_perms'].get('uuid')):
if not self._db_conn.match_uuid(req_obj_dict, obj_uuid):
msg = (
"UUID mismatch from %s:%s" %
(request.environ.get('REMOTE_ADDR',
"Remote address not found"),
request.environ.get('HTTP_USER_AGENT',
"User agent not found"))
)
self.config_object_error(
obj_uuid, fq_name_str, obj_type, 'put', msg)
self._db_conn.set_uuid(obj_type, req_obj_dict,
uuid.UUID(obj_uuid),
do_lock=False)
# Ensure object has at least default permissions set
self._ensure_id_perms_present(obj_uuid, req_obj_dict)
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name = fq_name_str
apiConfig.identifier_uuid = obj_uuid
apiConfig.operation = api_name
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig,
sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
if self.is_auth_needed():
ok, result = self._permissions.check_perms_write(request, obj_uuid)
if not ok:
(code, msg) = result
self.config_object_error(
obj_uuid, fq_name_str, obj_type, api_name, msg)
raise cfgm_common.exceptions.HttpError(code, msg)
# Validate perms on references
if req_obj_dict is not None:
try:
self._validate_perms_in_request(
r_class, obj_type, req_obj_dict)
except NoIdError:
raise cfgm_common.exceptions.HttpError(400,
'Unknown reference in resource update %s %s.'
%(obj_type, req_obj_dict))
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
if req_obj_dict is not None:
req_obj_dict['uuid'] = obj_uuid
# Permit abort resource update and retrun 202 status code
get_context().set_state('PENDING_DBE_UPDATE')
ok, result = r_class.pending_dbe_update(db_obj_dict, req_obj_dict,
req_prop_coll_updates)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
if ok and isinstance(result, tuple) and result[0] == 202:
# Modifications accepted but not applied, pending update
# returns 202 HTTP OK code to aware clients
bottle.response.status = 202
return True, ''
def stateful_update():
get_context().set_state('PRE_DBE_UPDATE')
# type-specific hook
(ok, result) = r_class.pre_dbe_update(
obj_uuid, obj_fq_name, req_obj_dict or {}, self._db_conn,
prop_collection_updates=req_prop_coll_updates)
if not ok:
return (ok, result)
attr_to_publish = None
if isinstance(result, dict):
attr_to_publish = result
get_context().set_state('DBE_UPDATE')
if api_name == 'ref-update':
# read ref_update args
ref_obj_type = ref_args.get('ref_obj_type')
ref_uuid = ref_args.get('ref_uuid')
ref_data = ref_args.get('data')
operation = ref_args.get('operation')
relax_ref_for_delete = ref_args.get('relax_ref_for_delete', False)
(ok, result) = db_conn.ref_update(
obj_type,
obj_uuid,
ref_obj_type,
ref_uuid,
ref_data,
operation,
db_obj_dict['id_perms'],
attr_to_publish=attr_to_publish,
relax_ref_for_delete=relax_ref_for_delete
)
elif req_obj_dict:
(ok, result) = db_conn.dbe_update(
obj_type,
obj_uuid,
req_obj_dict,
attr_to_publish=attr_to_publish,
)
# Update quota counter
if resource_type == 'project' and 'quota' in req_obj_dict:
proj_id = req_obj_dict['uuid']
quota_dict = req_obj_dict['quota']
path_prefix = self._path_prefix + proj_id
try:
QuotaHelper._zk_quota_counter_update(
path_prefix, quota_dict, proj_id, db_conn,
self.quota_counter)
except NoIdError:
msg = "Error in initializing quota "\
"Internal error : Failed to read resource count"
self.config_log(msg, level=SandeshLevel.SYS_ERR)
elif req_prop_coll_updates:
(ok, result) = db_conn.prop_collection_update(
obj_type,
obj_uuid,
req_prop_coll_updates,
attr_to_publish=attr_to_publish,
)
if not ok:
return (ok, result)
get_context().set_state('POST_DBE_UPDATE')
# type-specific hook
(ok, result) = r_class.post_dbe_update(
obj_uuid, obj_fq_name, req_obj_dict or {}, self._db_conn,
prop_collection_updates=req_prop_coll_updates)
if not ok:
return (ok, result)
return (ok, result)
# end stateful_update
try:
ok, result = stateful_update()
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, err_msg)
if not ok:
self.undo(result, obj_type, id=obj_uuid)
# Revert changes made to quota counter by using DB quota dict
if resource_type == 'project' and 'quota' in req_obj_dict:
proj_id = db_obj_dict['uuid']
quota_dict = db_obj_dict.get('quota') or None
path_prefix = self._path_prefix + proj_id
try:
QuotaHelper._zk_quota_counter_update(
path_prefix, quota_dict, proj_id, self._db_conn,
self.quota_counter)
except NoIdError:
err_msg = "Error in rolling back quota count on undo "\
"Internal error : Failed to read resource count"
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_update' %(obj_type), obj_uuid,
req_obj_dict, db_obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In post_%s_update an extension had error for %s' \
%(obj_type, req_obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# end _put_common
# parent_type needed for perms check. None for derived objects (eg.
# routing-instance)
def _delete_common(self, request, obj_type, uuid, parent_uuid):
# If not connected to zookeeper do not allow operations that
# causes the state change
if not self._db_conn._zk_db.is_connected():
return (False,
(503, "Not connected to zookeeper. Not able to perform requested action"))
# If there are too many pending updates to rabbit, do not allow
# operations that cause state change
npending = self._db_conn.dbe_oper_publish_pending()
if (npending >= int(self._args.rabbit_max_pending_updates)):
err_str = str(MaxRabbitPendingError(npending))
return (False, (500, err_str))
fq_name = self._db_conn.uuid_to_fq_name(uuid)
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name=':'.join(fq_name)
apiConfig.identifier_uuid = uuid
apiConfig.operation = 'delete'
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
# TODO check api + resource perms etc.
if not self.is_auth_needed() or not parent_uuid:
return (True, '')
"""
Validate parent allows write access. Implicitly trust
parent info in the object since coming from our DB.
"""
return self._permissions.check_perms_delete(request, obj_type, uuid,
parent_uuid)
# end _http_delete_common
def _post_validate(self, obj_type=None, obj_dict=None):
if not obj_dict:
return
def _check_field_present(fname):
fval = obj_dict.get(fname)
if not fval:
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, no %s in POST body" %(fname))
return fval
fq_name = _check_field_present('fq_name')
# well-formed name checks
if illegal_xml_chars_RE.search(fq_name[-1]):
raise cfgm_common.exceptions.HttpError(400,
"Bad Request, name has illegal xml characters")
if obj_type == 'route_target':
invalid_chars = self._INVALID_NAME_CHARS - set(':')
else:
invalid_chars = self._INVALID_NAME_CHARS
if any((c in invalid_chars) for c in fq_name[-1]):
raise cfgm_common.exceptions.HttpError(400,
"Bad Request, name has one of invalid chars %s"
%(invalid_chars))
# end _post_validate
def validate_parent_type(self, obj_type, obj_dict):
parent_type = obj_dict.get('parent_type')
r_class = self.get_resource_class(obj_type)
allowed_parent_types = r_class.parent_types
if parent_type:
if parent_type not in allowed_parent_types:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid parent type: %s. Allowed types: %s' % (
parent_type, allowed_parent_types))
elif (len(allowed_parent_types) > 1 and
'config-root' not in allowed_parent_types):
raise cfgm_common.exceptions.HttpError(
400, 'Missing parent type: %s. Allowed types: %s' % (
parent_type, allowed_parent_types))
elif len(allowed_parent_types) == 1:
parent_type = allowed_parent_types[0]
if parent_type in ('config-root', None):
if len(obj_dict['fq_name']) != 1:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid fq-name of an object with no parent: %s' % (
obj_dict['fq_name']))
elif len(obj_dict['fq_name']) < 2:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid fq-name for object with parent_type %s: %s' % (
parent_type, obj_dict['fq_name']))
# end validate_parent_type
def _post_common(self, obj_type, obj_dict):
self._ensure_services_conn(
'http_post', obj_type, obj_fq_name=obj_dict.get('fq_name'))
if not obj_dict:
# TODO check api + resource perms etc.
return (True, None)
# Fail if object exists already
try:
obj_uuid = self._db_conn.fq_name_to_uuid(
obj_type, obj_dict['fq_name'])
raise cfgm_common.exceptions.HttpError(
409, '' + pformat(obj_dict['fq_name']) +
' already exists with uuid: ' + obj_uuid)
except NoIdError:
pass
self.validate_parent_type(obj_type, obj_dict)
# Ensure object has at least default permissions set
self._ensure_id_perms_present(None, obj_dict)
self._ensure_perms2_present(obj_type, None, obj_dict,
get_request().headers.environ.get('HTTP_X_PROJECT_ID', None))
# TODO check api + resource perms etc.
uuid_in_req = obj_dict.get('uuid', None)
# Set the display name
if (('display_name' not in obj_dict) or
(obj_dict['display_name'] is None)):
obj_dict['display_name'] = obj_dict['fq_name'][-1]
fq_name_str = ":".join(obj_dict['fq_name'])
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name=fq_name_str
apiConfig.identifier_uuid = uuid_in_req
apiConfig.operation = 'post'
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
if uuid_in_req:
if uuid_in_req != str(uuid.UUID(uuid_in_req)):
bottle.abort(400, 'Invalid UUID format: ' + uuid_in_req)
try:
fq_name = self._db_conn.uuid_to_fq_name(uuid_in_req)
raise cfgm_common.exceptions.HttpError(
409, uuid_in_req + ' already exists with fq_name: ' +
pformat(fq_name))
except NoIdError:
pass
apiConfig.identifier_uuid = uuid_in_req
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
return (True, uuid_in_req)
# end _post_common
def reset(self):
# cleanup internal state/in-flight operations
if self._db_conn:
self._db_conn.reset()
# end reset
# allocate block of IP addresses from VN. Subnet info expected in request
# body
def vn_ip_alloc_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
# expected format {"subnet_list" : "192.168.127.12/24", "count" : 4}
req_dict = get_request().json
count = req_dict.get('count', 1)
subnet = req_dict.get('subnet')
family = req_dict.get('family')
try:
result = vnc_cfg_types.VirtualNetworkServer.ip_alloc(
vn_fq_name, subnet, count, family)
except vnc_addr_mgmt.AddrMgmtSubnetUndefined as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except vnc_addr_mgmt.AddrMgmtSubnetExhausted as e:
raise cfgm_common.exceptions.HttpError(409, str(e))
return result
# end vn_ip_alloc_http_post
# free block of ip addresses to subnet
def vn_ip_free_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
"""
{
"subnet" : "192.168.127.12/24",
"ip_addr": [ "192.168.3.11", "192.168.3.11", "192.168.127.12", "172.16.31.10" ]
}
"""
req_dict = get_request().json
ip_list = req_dict['ip_addr'] if 'ip_addr' in req_dict else []
result = vnc_cfg_types.VirtualNetworkServer.ip_free(
vn_fq_name, ip_list)
return result
# end vn_ip_free_http_post
# return no. of IP addresses from VN/Subnet
def vn_subnet_ip_count_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
# expected format {"subnet_list" : ["2.1.1.0/24", "1.1.1.0/24"]
req_dict = get_request().json
try:
(ok, result) = self._db_conn.dbe_read('virtual_network', id)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except Exception as e:
ok = False
result = cfgm_common.utils.detailed_traceback()
if not ok:
raise cfgm_common.exceptions.HttpError(500, result)
obj_dict = result
subnet_list = req_dict[
'subnet_list'] if 'subnet_list' in req_dict else []
result = vnc_cfg_types.VirtualNetworkServer.subnet_ip_count(
vn_fq_name, subnet_list)
return result
# end vn_subnet_ip_count_http_post
# check if token validatation needed
def is_auth_needed(self):
return self.aaa_mode != 'no-auth'
def is_rbac_enabled(self):
return self.aaa_mode == 'rbac'
@property
def aaa_mode(self):
return self._args.aaa_mode
@aaa_mode.setter
def aaa_mode(self, mode):
self._args.aaa_mode = mode
# indication if multi tenancy with rbac is enabled or disabled
def aaa_mode_http_get(self):
return {'aaa-mode': self.aaa_mode}
def aaa_mode_http_put(self):
aaa_mode = get_request().json['aaa-mode']
if aaa_mode not in AAA_MODE_VALID_VALUES:
raise ValueError('Invalid aaa-mode %s' % aaa_mode)
ok, result = self._auth_svc.validate_user_token()
if not ok:
code, msg = result
self.config_object_error(None, None, None, 'aaa_mode_http_put',
msg)
raise cfgm_common.exceptions.HttpError(code, msg)
if not self.is_admin_request():
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
self.aaa_mode = aaa_mode
if self.is_rbac_enabled():
self._create_default_rbac_rule()
return {'aaa-mode': self.aaa_mode}
# end
@property
def cloud_admin_role(self):
return self._args.cloud_admin_role
@property
def global_read_only_role(self):
return self._args.global_read_only_role
def set_tag(self):
self._post_common(None, {})
req_dict = get_request().json
obj_type = req_dict.pop('obj_type')
obj_uuid = req_dict.pop('obj_uuid')
need_update = False
if obj_type is None or obj_uuid is None:
msg = "Object type and UUID must be specified"
raise cfgm_common.exceptions.HttpError(400, msg)
ok, result = self._db_conn.dbe_read(
obj_type,
obj_uuid,
obj_fields=['parent_type', 'perms2', 'tag_refs'],
)
if not ok:
raise cfgm_common.exceptions.HttpError(*result)
obj_dict = result
def _locate_tag(type, value, is_global=False):
name = type + "=" + value
# unless global, inherit project id from caller
if is_global:
fq_name = [name]
else:
fq_name = copy.deepcopy(obj_dict['fq_name'])
if obj_type == 'project':
fq_name.append(name)
elif ('parent_type' in obj_dict and
obj_dict['parent_type'] == 'project'):
fq_name[-1] = name
elif ('perms2' in obj_dict and
is_uuid_like(obj_dict['perms2']['owner'])):
parent_uuid = str(uuid.UUID(obj_dict['perms2']['owner']))
try:
fq_name = self._db_conn.uuid_to_fq_name(parent_uuid)
except NoIdError:
msg = ("Cannot find %s %s owner" %
(obj_type, obj_dict['uuid']))
raise cfgm_common.exceptions.HttpError(404, msg)
fq_name.append(name)
else:
msg = ("Not able to determine the scope of the tag '%s'" %
name)
raise cfgm_common.exceptions.HttpError(404, msg)
# lookup (validate) tag
try:
tag_uuid = self._db_conn.fq_name_to_uuid('tag', fq_name)
except NoIdError:
msg = "Tag with FQName %s not found" % pformat(fq_name)
raise cfgm_common.exceptions.HttpError(404, msg)
return fq_name, tag_uuid
refs_per_type = {}
for ref in obj_dict.get('tag_refs', []):
ref_type = ref['to'][-1].partition('=')[0]
refs_per_type.setdefault(ref_type, []).append(ref)
for tag_type, attrs in req_dict.items():
tag_type = tag_type.lower()
# If the body of a Tag type is None, all references to that Tag
# type are remove on the resource
if attrs is None:
for ref in refs_per_type.get(tag_type, []):
need_update = True
obj_dict['tag_refs'].remove(ref)
refs_per_type[tag_type] = []
continue
# Else get defined values and update Tag references on the resource
is_global = attrs.get('is_global', False)
value = attrs.get('value')
add_values = set(attrs.get('add_values', []))
delete_values = set(attrs.get('delete_values', []))
# Tag type is unique per object, unless
# TAG_TYPE_NOT_UNIQUE_PER_OBJECT type
if tag_type not in TAG_TYPE_NOT_UNIQUE_PER_OBJECT:
if add_values or delete_values:
msg = ("Tag type %s cannot be set multiple times on a "
"same object." % tag_type)
raise cfgm_common.exceptions.HttpError(400, msg)
# address-group object can only be associated with label
if (obj_type == 'address_group' and
tag_type not in TAG_TYPE_AUTHORIZED_ON_ADDRESS_GROUP):
msg = ("Invalid tag type %s for object type %s" %
(tag_type, obj_type))
raise cfgm_common.exceptions.HttpError(400, msg)
refs_per_values = {}
if tag_type in refs_per_type:
refs_per_values = {ref['to'][-1].partition('=')[2]: ref for ref
in refs_per_type[tag_type]}
if tag_type not in TAG_TYPE_NOT_UNIQUE_PER_OBJECT:
if value is None or isinstance(value, list):
msg = "No valid value provided for tag type %s" % tag_type
raise cfgm_common.exceptions.HttpError(400, msg)
# don't need to update if tag type with same value already
# referenced
if value in refs_per_values:
continue
for ref in refs_per_values.values():
need_update = True
# object already have a reference to that tag type with a
# different value, remove it
obj_dict['tag_refs'].remove(ref)
# finally, reference the tag type with the new value
tag_fq_name, tag_uuid = _locate_tag(tag_type, value, is_global)
obj_dict.setdefault('tag_refs', []).append({
'uuid': tag_uuid,
'to': tag_fq_name,
'attr': None,
})
need_update = True
else:
# Add 'value' attribut to 'add_values' list if not null
if value is not None:
add_values.add(value)
for add_value in add_values - set(refs_per_values.keys()):
need_update = True
tag_fq_name, tag_uuid = _locate_tag(tag_type, add_value,
is_global)
obj_dict.setdefault('tag_refs', []).append({
'uuid': tag_uuid,
'to': tag_fq_name,
'attr': None,
})
for del_value in delete_values & set(refs_per_values.keys()):
need_update = True
obj_dict['tag_refs'].remove(refs_per_values[del_value])
if need_update:
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
return {}
def security_policy_draft(self):
self._post_common(None, {})
req_dict = get_request().json
scope_uuid = req_dict.pop('scope_uuid')
action = req_dict.pop('action')
pm_class = self.get_resource_class('policy-management')
try:
scope_type = self._db_conn.uuid_to_obj_type(scope_uuid)
except NoIdError as e:
msg = ("Cannot find scope where pending security resource are "
"own: %s" % str(e))
scope_class = self.get_resource_class(scope_type)
scope_fq_name = self._db_conn.uuid_to_fq_name(scope_uuid)
pm_fq_name = [POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT]
if (scope_type == GlobalSystemConfig.object_type and
scope_fq_name == GlobalSystemConfig().fq_name):
parent_type = PolicyManagement.resource_type
parent_fq_name = PolicyManagement().fq_name
parent_uuid = self._global_pm_uuid
else:
pm_fq_name = scope_fq_name + pm_fq_name
parent_type = scope_class.resource_type
parent_fq_name = scope_fq_name
parent_uuid = scope_uuid
ok, result = pm_class.locate(
fq_name=pm_fq_name,
create_it=False,
fields=['%ss' % type for type in SECURITY_OBJECT_TYPES],
)
if not ok and result[0] == 404:
# Draft dedicated policy management does not exists, the draft mode
# is not enabled on the scope
msg = ("Security draft mode is not enabled on the %s %s (%s)" %
(scope_type.replace('_', ' ').title(), scope_fq_name,
scope_uuid))
raise cfgm_common.exceptions.HttpError(400, msg)
if not ok:
raise cfgm_common.exceptions.HttpError(result[0], result[1])
pm = result
scope_lock = self._db_conn._zk_db._zk_client.write_lock(
'%s/%s/%s' % (
self.security_lock_prefix, scope_type,
':'.join(scope_fq_name)
),
'api-server-%s %s' % (socket.gethostname(), action),
)
try:
acquired_lock = scope_lock.acquire(timeout=1)
except LockTimeout:
acquired_lock = False
if acquired_lock:
try:
if action == 'commit':
self._security_commit_resources(scope_type, parent_type,
parent_fq_name,
parent_uuid, pm)
elif action == 'discard':
self._security_discard_resources(pm)
else:
msg = "Only 'commit' or 'discard' actions are supported"
raise cfgm_common.exceptions.HttpError(400, msg)
finally:
scope_lock.release()
else:
contenders = scope_lock.contenders()
action_in_progress = '<unknown action>'
if len(contenders) > 0 and contenders[0]:
_, _, action_in_progress = contenders[0].partition(' ')
msg = ("Security resource modifications or commit/discard action "
"on %s '%s' (%s) scope is under progress. Try again later."
% (scope_type.replace('_', ' ').title(),
':'.join(scope_fq_name), scope_uuid))
raise cfgm_common.exceptions.HttpError(400, msg)
# TODO(ethuleau): we could return some stats or type/uuid resources
# actions which were done during commit or discard?
return {}
def _security_commit_resources(self, scope_type, parent_type,
parent_fq_name, parent_uuid, pm):
updates = []
deletes = []
held_refs = []
for type_name in SECURITY_OBJECT_TYPES:
r_class = self.get_resource_class(type_name)
for child in pm.get('%ss' % r_class.object_type, []):
ok, result = r_class.locate(child['to'], child['uuid'],
create_it=False)
if not ok:
continue
draft = result
fq_name = parent_fq_name + [child['to'][-1]]
try:
uuid = self._db_conn.fq_name_to_uuid(r_class.object_type,
fq_name)
except NoIdError:
# No original version found, new resource created
uuid = None
self._holding_backrefs(held_refs, scope_type,
r_class.object_type, fq_name, draft)
# Purge pending resource as we re-use the same UUID
self.internal_request_delete(r_class.object_type,
child['uuid'])
if uuid and draft['draft_mode_state'] == 'deleted':
# The resource is removed, we can purge original resource
deletes.append((r_class.object_type, uuid))
elif uuid and draft['draft_mode_state'] == 'updated':
# Update orginal resource with pending resource
draft.pop('fq_name', None)
draft.pop('uuid', None)
draft.pop('draft_mode_state', None)
if 'id_perms' in draft:
draft['id_perms'].pop('uuid', None)
draft['parent_type'] = parent_type
draft['parent_uuid'] = parent_uuid
# if a ref type was purge when the draft mode is enabled,
# set the ref to an empty list to ensure all refs will be
# removed when resource will be updated/committed
for ref_type in r_class.ref_fields:
if ref_type not in draft:
draft[ref_type] = []
self._update_fq_name_security_refs(
parent_fq_name, pm['fq_name'], type_name, draft)
updates.append(('update', (r_class.resource_type, uuid,
copy.deepcopy(draft))))
elif not uuid and draft['draft_mode_state'] == 'created':
# Create new resource with pending values (re-use UUID)
draft.pop('id_perms', None)
draft.pop('perms2', None)
draft.pop('draft_mode_state', None)
draft['fq_name'] = fq_name
draft['parent_type'] = parent_type
draft['parent_uuid'] = parent_uuid
self._update_fq_name_security_refs(
parent_fq_name, pm['fq_name'], type_name, draft)
updates.append(('create', (r_class.resource_type,
copy.deepcopy(draft))))
else:
msg = (
"Try to commit a security resource %s (%s) with "
"invalid state '%s'. Ignore it." %
(':'.join(draft.get('fq_name', ['FQ name unknown'])),
draft.get('uuid', 'UUID unknown'),
draft.get('draft_mode_state', 'No draft mode state'))
)
self.config_log(msg, level=SandeshLevel.SYS_WARN)
# Need to create/update leaf resources first as they could be
# referenced by another create/updated resource (e.g.: FP -> FP)
updates.reverse() # order is: AG, SG, FR, FP and APS
for action, args in updates:
getattr(self, 'internal_request_%s' % action)(*args)
# Postpone delete to be sure deleted resource not anymore
# referenced and delete resource with ref before resource with backref
for args in deletes: # order is: APS, FP, FR, SG and AG
self.internal_request_delete(*args)
for args, kwargs in held_refs:
self.internal_request_ref_update(*args, **kwargs)
@staticmethod
def _update_fq_name_security_refs(parent_fq_name, pm_fq_name, res_type,
draft):
for ref_type in SECURITY_OBJECT_TYPES:
for ref in draft.get('%s_refs' % ref_type, []):
if ref['to'][:-1] == pm_fq_name:
ref['to'] = parent_fq_name + [ref['to'][-1]]
if res_type == 'firewall_rule':
for ep in [draft.get('endpoint_1', {}),
draft.get('endpoint_2', {})]:
ag_fq_name = ep.get('address_group', [])
if ag_fq_name and ag_fq_name.split(':')[:-1] == pm_fq_name:
ep['address_group'] = ':'.join(parent_fq_name + [
ag_fq_name.split(':')[-1]])
def _holding_backrefs(self, held_refs, scope_type, obj_type, fq_name,
obj_dict):
backref_fields = {'%s_back_refs' % t for t in SECURITY_OBJECT_TYPES}
if (scope_type == GlobalSystemConfig().object_type and
obj_dict['draft_mode_state'] != 'deleted'):
for backref_field in set(obj_dict.keys()) & backref_fields:
backref_type = backref_field[:-10]
for backref in copy.deepcopy(obj_dict.get(backref_field, [])):
# if it's a backref to global resource let it
if backref['to'][0] in [PolicyManagement().name,
POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT]:
continue
self.internal_request_ref_update(
backref_type,
backref['uuid'],
'DELETE',
obj_type,
ref_uuid=obj_dict['uuid'],
)
held_refs.append(
((backref_type, backref['uuid'], 'ADD', obj_type),
{
'ref_fq_name': fq_name,
'attr': backref.get('attr')
}
)
)
obj_dict[backref_field].remove(backref)
def _security_discard_resources(self, pm):
for type_name in SECURITY_OBJECT_TYPES:
r_class = self.get_resource_class(type_name)
for child in pm.get('%ss' % r_class.object_type, []):
self.internal_request_delete(r_class.object_type,
child['uuid'])
def main(args_str=None, server=None):
vnc_api_server = server
pipe_start_app = vnc_api_server.get_pipe_start_app()
server_ip = vnc_api_server.get_listen_ip()
server_port = vnc_api_server.get_server_port()
""" @sigchld
Disable handling of SIG_CHLD for now as every keystone request to validate
token sends SIG_CHLD signal to API server.
"""
#hub.signal(signal.SIGCHLD, vnc_api_server.sigchld_handler)
hub.signal(signal.SIGTERM, vnc_api_server.sigterm_handler)
hub.signal(signal.SIGHUP, vnc_api_server.sighup_handler)
if pipe_start_app is None:
pipe_start_app = vnc_api_server.api_bottle
try:
bottle.run(app=pipe_start_app, host=server_ip, port=server_port,
server=get_bottle_server(server._args.max_requests))
except KeyboardInterrupt:
# quietly handle Ctrl-C
pass
finally:
# always cleanup gracefully
vnc_api_server.reset()
# end main
def server_main(args_str=None):
vnc_cgitb.enable(format='text')
main(args_str, VncApiServer(args_str))
#server_main
if __name__ == "__main__":
server_main()
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
"""
This is the main module in vnc_cfg_api_server package. It manages interaction
between http/rest, address management, authentication and database interfaces.
"""
from gevent import monkey
monkey.patch_all()
from gevent import hub
# from neutron plugin to api server, the request URL could be large.
# fix the const
import gevent.pywsgi
gevent.pywsgi.MAX_REQUEST_LINE = 65535
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
import ConfigParser
import functools
import hashlib
import logging
import logging.config
import signal
import netaddr
import os
import re
import random
import socket
from cfgm_common import jsonutils as json
from provision_defaults import *
import uuid
import copy
from pprint import pformat
from cStringIO import StringIO
from vnc_api.utils import AAA_MODE_VALID_VALUES
# import GreenletProfiler
from cfgm_common import vnc_cgitb
import subprocess
import traceback
from kazoo.exceptions import LockTimeout
from cfgm_common import has_role
from cfgm_common import _obj_serializer_all
from cfgm_common.utils import _DEFAULT_ZK_COUNTER_PATH_PREFIX
from cfgm_common.utils import _DEFAULT_ZK_LOCK_PATH_PREFIX
from cfgm_common import is_uuid_like
from cfgm_common import SG_NO_RULE_FQ_NAME, SG_NO_RULE_NAME, UUID_PATTERN
from cfgm_common.uve.vnc_api.ttypes import VncApiLatencyStats, VncApiLatencyStatsLog
logger = logging.getLogger(__name__)
import time
import requests
import xml.etree.ElementTree as etree
from functools import partial
"""
Following is needed to silence warnings on every request when keystone
auth_token middleware + Sandesh is used. Keystone or Sandesh alone
do not produce these warnings.
Exception AttributeError: AttributeError(
"'_DummyThread' object has no attribute '_Thread__block'",)
in <module 'threading' from '/usr/lib64/python2.7/threading.pyc'> ignored
See http://stackoverflow.com/questions/13193278/understand-python-threading-bug
for more information.
"""
import threading
threading._DummyThread._Thread__stop = lambda x: 42
CONFIG_VERSION = '1.0'
import bottle
import utils
import context
from context import get_request, get_context, set_context, use_context
from context import ApiContext
from context import is_internal_request
import vnc_cfg_types
from vnc_db import VncDbClient
import cfgm_common
from cfgm_common import ignore_exceptions
from cfgm_common.uve.vnc_api.ttypes import VncApiCommon, VncApiConfigLog,\
VncApiDebug, VncApiInfo, VncApiNotice, VncApiError
from cfgm_common.uve.vnc_api.ttypes import FabricJobExecution, FabricJobUve, \
PhysicalRouterJobExecution, PhysicalRouterJobUve
from cfgm_common import illegal_xml_chars_RE
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, Module2NodeType,\
NodeTypeNames, INSTANCE_ID_DEFAULT, TagTypeNameToId,\
TAG_TYPE_NOT_UNIQUE_PER_OBJECT, TAG_TYPE_AUTHORIZED_ON_ADDRESS_GROUP,\
POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT, SECURITY_OBJECT_TYPES
from provision_defaults import Provision
from vnc_quota import *
from vnc_api.gen.resource_xsd import *
from vnc_api.gen.resource_common import *
from vnc_api.gen.vnc_api_client_gen import all_resource_type_tuples
import cfgm_common
from cfgm_common.utils import cgitb_hook
from cfgm_common.rest import LinkObject, hdr_server_tenant
from cfgm_common.exceptions import *
from cfgm_common.vnc_extensions import ExtensionManager
import vnc_addr_mgmt
import vnc_auth
import vnc_auth_keystone
import vnc_perms
import vnc_rbac
from cfgm_common.uve.cfgm_cpuinfo.ttypes import ModuleCpuState, ModuleCpuStateTrace
from cfgm_common.buildinfo import build_info
from cfgm_common.vnc_api_stats import log_api_stats
from pysandesh.sandesh_base import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
# from gen_py.vnc_api.ttypes import *
import netifaces
from pysandesh.connection_info import ConnectionState
from cfgm_common.uve.nodeinfo.ttypes import NodeStatusUVE, \
NodeStatus
from sandesh.traces.ttypes import RestApiTrace
from vnc_bottle import get_bottle_server
from cfgm_common.vnc_greenlets import VncGreenlet
_ACTION_RESOURCES = [
{'uri': '/prop-collection-get', 'link_name': 'prop-collection-get',
'method': 'GET', 'method_name': 'prop_collection_http_get'},
{'uri': '/prop-collection-update', 'link_name': 'prop-collection-update',
'method': 'POST', 'method_name': 'prop_collection_http_post'},
{'uri': '/ref-update', 'link_name': 'ref-update',
'method': 'POST', 'method_name': 'ref_update_http_post'},
{'uri': '/ref-relax-for-delete', 'link_name': 'ref-relax-for-delete',
'method': 'POST', 'method_name': 'ref_relax_for_delete_http_post'},
{'uri': '/fqname-to-id', 'link_name': 'name-to-id',
'method': 'POST', 'method_name': 'fq_name_to_id_http_post'},
{'uri': '/id-to-fqname', 'link_name': 'id-to-name',
'method': 'POST', 'method_name': 'id_to_fq_name_http_post'},
{'uri': '/useragent-kv', 'link_name': 'useragent-keyvalue',
'method': 'POST', 'method_name': 'useragent_kv_http_post'},
{'uri': '/db-check', 'link_name': 'database-check',
'method': 'POST', 'method_name': 'db_check'},
{'uri': '/fetch-records', 'link_name': 'fetch-records',
'method': 'POST', 'method_name': 'fetch_records'},
{'uri': '/start-profile', 'link_name': 'start-profile',
'method': 'POST', 'method_name': 'start_profile'},
{'uri': '/stop-profile', 'link_name': 'stop-profile',
'method': 'POST', 'method_name': 'stop_profile'},
{'uri': '/list-bulk-collection', 'link_name': 'list-bulk-collection',
'method': 'POST', 'method_name': 'list_bulk_collection_http_post'},
{'uri': '/obj-perms', 'link_name': 'obj-perms',
'method': 'GET', 'method_name': 'obj_perms_http_get'},
{'uri': '/chown', 'link_name': 'chown',
'method': 'POST', 'method_name': 'obj_chown_http_post'},
{'uri': '/chmod', 'link_name': 'chmod',
'method': 'POST', 'method_name': 'obj_chmod_http_post'},
{'uri': '/aaa-mode', 'link_name': 'aaa-mode',
'method': 'PUT', 'method_name': 'aaa_mode_http_put'},
{'uri': '/obj-cache', 'link_name': 'obj-cache',
'method': 'GET', 'method_name': 'dump_cache'},
{'uri': '/obj-cache', 'link_name': 'obj-cache',
'method': 'POST', 'method_name': 'dump_cache'},
{'uri': '/execute-job', 'link_name': 'execute-job',
'method': 'POST', 'method_name': 'execute_job_http_post'},
]
_MANDATORY_PROPS = [
'loadbalancer_healthmonitor_properties',
]
def error_400(err):
return err.body
# end error_400
def error_403(err):
return err.body
# end error_403
def error_404(err):
return err.body
# end error_404
def error_405(err):
return err.body
# end error_405
def error_409(err):
return err.body
# end error_409
@bottle.error(412)
def error_412(err):
return err.body
# end error_412
def error_500(err):
return err.body
# end error_500
def error_503(err):
return err.body
# end error_503
class VncApiServer(object):
"""
This is the manager class co-ordinating all classes present in the package
"""
_INVALID_NAME_CHARS = set(':')
_GENERATE_DEFAULT_INSTANCE = [
'namespace',
'project',
'virtual_network', 'virtual-network',
'network_ipam', 'network-ipam',
]
def __new__(cls, *args, **kwargs):
obj = super(VncApiServer, cls).__new__(cls, *args, **kwargs)
obj.api_bottle = bottle.Bottle()
obj.route('/', 'GET', obj.homepage_http_get)
obj.api_bottle.error_handler = {
400: error_400,
403: error_403,
404: error_404,
405: error_405,
409: error_409,
500: error_500,
503: error_503,
}
cls._generate_resource_crud_methods(obj)
cls._generate_resource_crud_uri(obj)
for act_res in _ACTION_RESOURCES:
http_method = act_res.get('method', 'POST')
method_name = getattr(obj, act_res['method_name'])
obj.route(act_res['uri'], http_method, method_name)
return obj
# end __new__
@classmethod
def _validate_complex_type(cls, dict_cls, dict_body):
if dict_body is None:
return
for key, value in dict_body.items():
if key not in dict_cls.attr_fields:
raise ValueError('class %s does not have field %s' % (
str(dict_cls), key))
attr_type_vals = dict_cls.attr_field_type_vals[key]
attr_type = attr_type_vals['attr_type']
restrictions = attr_type_vals['restrictions']
is_array = attr_type_vals.get('is_array', False)
if value is None:
continue
if is_array:
if not isinstance(value, list):
raise ValueError('Field %s must be a list. Received value: %s'
% (key, str(value)))
values = value
else:
values = [value]
if attr_type_vals['is_complex']:
attr_cls = cfgm_common.utils.str_to_class(attr_type, __name__)
for item in values:
if attr_type == 'AllowedAddressPair':
cls._validate_allowed_address_pair_prefix_len(item)
cls._validate_complex_type(attr_cls, item)
else:
simple_type = attr_type_vals['simple_type']
for item in values:
cls._validate_simple_type(key, attr_type,
simple_type, item,
restrictions)
# end _validate_complex_type
@classmethod
def _validate_allowed_address_pair_prefix_len(cls, value):
'''Do not allow configuration of AAP with
IPv4 prefix length less than 24 and 120 for IPv6.
LP #1720118
'''
if value['address_mode'] == 'active-standby':
ip_net_family = netaddr.IPNetwork(value['ip']['ip_prefix']).version
if ip_net_family == 6 and value['ip']['ip_prefix_len'] < 120:
raise ValueError('IPv6 Prefix length lesser than 120 is'
' is not acceptable')
if ip_net_family == 4 and value['ip']['ip_prefix_len'] < 24:
raise ValueError('IPv4 Prefix length lesser than 24'
' is not acceptable')
# end _validate_allowed_address_pair_prefix_len
@classmethod
def _validate_communityattribute_type(cls, value):
poss_values = ["no-export",
"accept-own",
"no-advertise",
"no-export-subconfed",
"no-reoriginate"]
if value in poss_values:
return
res = re.match('[0-9]+:[0-9]+', value)
if res is None:
raise ValueError('Invalid community format %s. '
'Change to \'number:number\''
% value)
asn = value.split(':')
if int(asn[0]) > 65535:
raise ValueError('Out of range ASN value %s. '
'ASN values cannot exceed 65535.'
% value)
@classmethod
def _validate_serviceinterface_type(cls, value):
poss_values = ["management",
"left",
"right"]
if value in poss_values:
return
res = re.match('other[0-9]*', value)
if res is None:
raise ValueError('Invalid service interface type %s. '
'Valid values are: management|left|right|other[0-9]*'
% value)
def validate_execute_job_input_params(self, request_params):
device_list = None
job_template_id = request_params.get('job_template_id')
job_template_fq_name = request_params.get('job_template_fq_name')
if not (job_template_id or job_template_fq_name):
err_msg = "Either job_template_id or job_template_fq_name" \
" required in request"
raise cfgm_common.exceptions.HttpError(400, err_msg)
# check if the job template id is a valid uuid
if job_template_id:
if self.invalid_uuid(job_template_id):
msg = 'Invalid job-template uuid type %s. uuid type required' \
% job_template_id
raise cfgm_common.exceptions.HttpError(400, msg)
try:
job_template_fqname = self._db_conn.uuid_to_fq_name(
job_template_id)
request_params['job_template_fq_name'] = job_template_fqname
except NoIdError as no_id_exec:
raise cfgm_common.exceptions.HttpError(404, str(no_id_exec))
except Exception as e:
msg = "Error while reading job_template_id: " + str(e)
raise cfgm_common.exceptions.HttpError(400, msg)
else:
# check if the job template fqname is a valid fq_name
try:
job_template_id = self._db_conn.fq_name_to_uuid(
"job_template", job_template_fq_name)
request_params['job_template_id'] = job_template_id
except NoIdError as no_id_exec:
raise cfgm_common.exceptions.HttpError(404, str(no_id_exec))
except Exception as e:
msg = "Error while reading job_template_fqname: " + str(e)
raise cfgm_common.exceptions.HttpError(400, msg)
extra_params = request_params.get('params')
if extra_params is not None:
device_list = extra_params.get('device_list')
if device_list:
if not isinstance(device_list, list):
err_msg = "malformed request param: device_list, " \
"expects list"
raise cfgm_common.exceptions.HttpError(400, err_msg)
for device_id in device_list:
if not isinstance(device_id, basestring):
err_msg = "malformed request param: device_list, " \
"expects list of string device_uuids," \
" found device_uuid %s" % device_id
raise cfgm_common.exceptions.HttpError(400, err_msg)
# check if the device id passed is a valid uuid
if self.invalid_uuid(device_id):
msg = 'Invalid device uuid type %s.' \
' uuid type required' % device_id
raise cfgm_common.exceptions.HttpError(400, msg)
return device_list
def job_mgr_signal_handler(self, signalnum, frame):
try:
#get the child process id that called the signal handler
pid = os.waitpid(-1, os.WNOHANG)
signal_var = self._job_mgr_running_instances.get(str(pid[0]))
if not signal_var:
self.config_log("job mgr process %s not found in the instance "
"map!" % str(pid), level=SandeshLevel.SYS_ERR)
return
msg = "Entered job_mgr_signal_handler for: %s" % signal_var
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
# update job manager execution status uve
elapsed_time = time.time() - signal_var.get('start_time')
status = "UNKNOWN"
if signal_var.get('fabric_name') is not "__DEFAULT__":
try:
# read the job object log for a particular job to check if
# it succeeded or not
jobObjLog_payload = {
'start_time': 'now-%ds' % (elapsed_time),
'end_time': 'now',
'select_fields': ['MessageTS', 'Messagetype', 'ObjectLog'],
'table': 'ObjectJobExecutionTable',
'where': [
[
{
'name': 'ObjectId',
'value': '%s:SUCCESS' % signal_var.get('exec_id'),
'op': 1
}
]
]
}
url = "http://localhost:8081/analytics/query"
resp = requests.post(url, json=jobObjLog_payload)
if resp.status_code == 200:
JobLog = resp.json().get('value')
if not JobLog:
status = 'FAILURE'
else:
status = 'SUCCESS'
else:
self.config_log("POST request to query job object log "
"failed with error %s" %
resp.status_code,
level=SandeshLevel.SYS_ERR)
except (requests.ConnectionError, requests.ConnectTimeout,
requests.HTTPError, requests.Timeout) as ex:
self.config_log("POST request to query job object log "
"failed with error %s" % str(ex),
level=SandeshLevel.SYS_ERR)
pass
#send uve irrespective of the job log query
# success/failure with job status
job_execution_data = FabricJobExecution(
name=signal_var.get('fabric_name'),
job_status=status,
percentage_completed=100)
job_execution_uve = FabricJobUve(data=job_execution_data,
sandesh=self._sandesh)
job_execution_uve.send(sandesh=self._sandesh)
try:
# read the last PRouter state for all Prouetrs
payload = {
'sort':1,
'start_time': 'now-%ds' % (elapsed_time),
'sort_fields': ['MessageTS'],
'end_time': 'now',
'select_fields': ['MessageTS', 'Messagetype', 'ObjectLog'],
'table': 'ObjectJobExecutionTable',
'where': [
[
{
'name': 'Messagetype',
'value': 'PRouterOnboardingLog',
'op': 1
},
{
'name': 'ObjectId',
'value': '%s' % signal_var.get('exec_id'),
'op': 1
}
]
]
}
url = "http://localhost:8081/analytics/query"
resp = requests.post(url, json=payload)
if resp.status_code == 200:
PRouterOnboardingLog = resp.json().get('value')
for PRObjectLog in PRouterOnboardingLog:
resp = PRObjectLog.get('ObjectLog')
xmlresp = etree.fromstring(resp)
for ele in xmlresp.iter():
if ele.tag == 'name':
device_fqname = ele.text
if ele.tag == 'onboarding_state':
onboarding_state = ele.text
if device_fqname and onboarding_state:
prouter_uve_name = device_fqname + ":" + \
signal_var.get('fabric_name')
prouter_job_data = PhysicalRouterJobExecution(
name=prouter_uve_name,
execution_id=signal_var.get('exec_id'),
job_start_ts=int(round(signal_var.get('start_time') * 1000)),
prouter_state=onboarding_state
)
prouter_job_uve = PhysicalRouterJobUve(
data=prouter_job_data, sandesh=self._sandesh)
prouter_job_uve.send(sandesh=self._sandesh)
else:
self.config_log("POST request to query Prouter job "
"object log failed with error %s" %
resp.status_code,
level=SandeshLevel.SYS_ERR)
except (requests.ConnectionError, requests.ConnectTimeout,
requests.HTTPError, requests.Timeout) as ex:
self.config_log("POST request to query Prouter job object "
"log failed with error %s" % str(ex),
level=SandeshLevel.SYS_ERR)
finally:
#remove the pid entry of the processed job_mgr process
del self._job_mgr_running_instances[str(pid[0])]
except OSError as process_error:
self.config_log("Couldn retrieve the child process id. OS call "
"returned with error %s" % str(process_error),
level=SandeshLevel.SYS_ERR)
def execute_job_http_post(self):
''' Payload of execute_job
job_template_id (Mandatory if no job_template_fq_name): <uuid> of
the created job_template
job_template_fq_name (Mandatory if no job_template_id): fqname in
the format: ["<global-system-config-name>",
"<name of the job-template>"]
input (Type json): Input Schema of the playbook under the
job_template_id
params (Type json): Extra_params for the job_manager
(Eg. device_list)
E.g. Payload:
{
"job_template_id": "<uuid>",
"params": {
"device_list": ["<device_uuid1>", "<device_uuid2>", ....
"<device_uuidn>"]
}
}
'''
try:
if not self._args.enable_fabric_ansible:
err_msg = "Fabric ansible job manager is disabled. " \
"Please enable it by setting the " \
"'enable_fabric_ansible' to True in the conf file"
raise cfgm_common.exceptions.HttpError(405, err_msg)
self.config_log("Entered execute-job",
level=SandeshLevel.SYS_NOTICE)
request_params = get_request().json
msg = "Job Input %s " % json.dumps(request_params)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
device_list = self.validate_execute_job_input_params(
request_params)
# TODO - pass the job manager config file from api server config
# read the device object and pass the necessary data to the job
if device_list:
self.read_device_data(device_list, request_params)
else:
self.read_fabric_data(request_params)
# generate the job execution id
execution_id = uuid.uuid4()
request_params['job_execution_id'] = str(execution_id)
# get the auth token
auth_token = get_request().get_header('X-Auth-Token')
request_params['auth_token'] = auth_token
# pass the required config args to job manager
job_args = {'collectors': self._args.collectors,
'fabric_ansible_conf_file':
self._args.fabric_ansible_conf_file
}
request_params['args'] = json.dumps(job_args)
fabric_job_name = request_params.get('job_template_fq_name')
fabric_job_name.insert(0, request_params.get('fabric_fq_name'))
fabric_job_uve_name = ':'.join(map(str, fabric_job_name))
# create job manager fabric execution status uve
if request_params.get('fabric_fq_name') is not "__DEFAULT__":
job_execution_data = FabricJobExecution(
name=fabric_job_uve_name,
execution_id=request_params.get('job_execution_id'),
job_start_ts=int(round(time.time() * 1000)),
job_status="STARTING",
percentage_completed=0.0
)
job_execution_uve = FabricJobUve(data=job_execution_data,
sandesh=self._sandesh)
job_execution_uve.send(sandesh=self._sandesh)
if device_list:
for device_id in device_list:
device_fqname = request_params.get(
'device_json').get(device_id).get('device_fqname')
device_fqname = ':'.join(map(str, device_fqname))
prouter_uve_name = device_fqname + ":" + \
fabric_job_uve_name
prouter_job_data = PhysicalRouterJobExecution(
name=prouter_uve_name,
execution_id=request_params.get('job_execution_id'),
job_start_ts=int(round(time.time() * 1000))
)
prouter_job_uve = PhysicalRouterJobUve(
data=prouter_job_data, sandesh=self._sandesh)
prouter_job_uve.send(sandesh=self._sandesh)
start_time = time.time()
signal_var = {
'fabric_name': fabric_job_uve_name ,
'start_time': start_time ,
'exec_id': request_params.get('job_execution_id')
}
# handle process exit signal
signal.signal(signal.SIGCHLD, self.job_mgr_signal_handler)
# create job manager subprocess
job_mgr_path = os.path.dirname(__file__) + "/../job_manager/job_mgr.py"
job_process = subprocess.Popen(["python", job_mgr_path, "-i",
json.dumps(request_params)],
cwd="/", close_fds=True)
self._job_mgr_running_instances[str(job_process.pid)] = signal_var
self.config_log("Created job manager process. Execution id: %s" %
execution_id,
level=SandeshLevel.SYS_NOTICE)
return {'job_execution_id': str(execution_id),
'job_manager_process_id': str(job_process.pid)}
except cfgm_common.exceptions.HttpError as e:
raise
except Exception as e:
err_msg = "Error while executing job request: %s" % repr(e)
raise cfgm_common.exceptions.HttpError(500, err_msg)
def read_fabric_data(self, request_params):
if request_params.get('input') is None:
err_msg = "Missing job input"
raise cfgm_common.exceptions.HttpError(400, err_msg)
# get the fabric fq_name from the database if fabric_uuid is provided
fabric_fq_name = None
if request_params.get('input').get('fabric_uuid'):
fabric_uuid = request_params.get('input').get('fabric_uuid')
try:
fabric_fq_name = self._db_conn.uuid_to_fq_name(fabric_uuid)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
elif request_params.get('input').get('fabric_fq_name'):
fabric_fq_name = request_params.get('input').get('fabric_fq_name')
else:
if "device_deletion_template" in request_params.get(
'job_template_fq_name'):
fabric_fq_name = "__DEFAULT__"
else:
err_msg = "Missing fabric details in the job input"
raise cfgm_common.exceptions.HttpError(400, err_msg)
if fabric_fq_name:
fabric_fq_name_str = ':'.join(map(str, fabric_fq_name))
request_params['fabric_fq_name'] = fabric_fq_name_str
def read_device_data(self, device_list, request_params):
device_data = dict()
for device_id in device_list:
db_conn = self._db_conn
try:
(ok, result) = db_conn.dbe_read(
"physical-router", device_id,
['physical_router_user_credentials',
'physical_router_management_ip', 'fq_name',
'physical_router_device_family',
'physical_router_vendor_name',
'physical_router_product_name',
'fabric_refs'])
if not ok:
self.config_object_error(device_id, None,
"physical-router ",
'execute_job', result)
raise cfgm_common.exceptions.HttpError(500, result)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
device_json = {"device_management_ip": result[
'physical_router_management_ip']}
device_json.update({"device_fqname": result['fq_name']})
user_cred = result.get('physical_router_user_credentials')
if user_cred:
device_json.update({"device_username": user_cred['username']})
device_json.update({"device_password":
user_cred['password']})
device_family = result.get("physical_router_device_family")
if device_family:
device_json.update({"device_family": device_family})
device_vendor_name = result.get("physical_router_vendor_name")
if device_vendor_name:
device_json.update({"device_vendor": device_vendor_name})
device_product_name = result.get("physical_router_product_name")
if device_product_name:
device_json.update({"device_product": device_product_name})
device_data.update({device_id: device_json})
fabric_refs = result.get('fabric_refs')
if fabric_refs and len(fabric_refs) > 0:
fabric_fq_name = result.get('fabric_refs')[0].get('to')
fabric_fq_name_str = ':'.join(map(str, fabric_fq_name))
request_params['fabric_fq_name'] = fabric_fq_name_str
if len(device_data) > 0:
request_params.update({"device_json": device_data})
@classmethod
def _validate_simple_type(cls, type_name, xsd_type, simple_type, value, restrictions=None):
if value is None:
return
elif xsd_type in ('unsignedLong', 'integer'):
if not isinstance(value, (int, long)):
# If value is not an integer, then try to convert it to integer
try:
value = int(value)
except (TypeError, ValueError):
raise ValueError('%s: integer value expected instead of %s' %(
type_name, value))
if restrictions:
if not (int(restrictions[0]) <= value <= int(restrictions[1])):
raise ValueError('%s: value must be between %s and %s' %(
type_name, restrictions[0], restrictions[1]))
elif xsd_type == 'boolean':
if not isinstance(value, bool):
raise ValueError('%s: true/false expected instead of %s' %(
type_name, value))
elif xsd_type == 'string' and simple_type == 'CommunityAttribute':
cls._validate_communityattribute_type(value)
elif xsd_type == 'string' and simple_type == 'ServiceInterfaceType':
cls._validate_serviceinterface_type(value)
else:
if not isinstance(value, basestring):
raise ValueError('%s: string value expected instead of %s' %(
type_name, value))
if restrictions and value not in restrictions:
raise ValueError('%s: value must be one of %s' % (
type_name, str(restrictions)))
return value
# end _validate_simple_type
def _check_mandatory_props_list(self, prop_name):
return prop_name in _MANDATORY_PROPS
# end _check_mandatory_props_list
def _validate_props_in_request(self, resource_class, obj_dict, operation):
for prop_name in resource_class.prop_fields:
prop_field_types = resource_class.prop_field_types[prop_name]
is_simple = not prop_field_types['is_complex']
prop_type = prop_field_types['xsd_type']
restrictions = prop_field_types['restrictions']
simple_type = prop_field_types['simple_type']
is_list_prop = prop_name in resource_class.prop_list_fields
is_map_prop = prop_name in resource_class.prop_map_fields
prop_value = obj_dict.get(prop_name)
if not prop_value:
if operation == 'CREATE' and (
prop_field_types['required'] == 'required'):
if self._check_mandatory_props_list(prop_name):
err_msg = '%s property is missing' %prop_name
return False, err_msg
continue
if is_simple:
try:
obj_dict[prop_name] = self._validate_simple_type(prop_name,
prop_type, simple_type,
prop_value, restrictions)
except Exception as e:
err_msg = 'Error validating property ' + str(e)
return False, err_msg
else:
continue
prop_cls = cfgm_common.utils.str_to_class(prop_type, __name__)
if isinstance(prop_value, dict):
try:
self._validate_complex_type(prop_cls, prop_value)
except Exception as e:
err_msg = 'Error validating property %s value %s ' %(
prop_name, prop_value)
err_msg += str(e)
return False, err_msg
else: # complex-type + value isn't dict or wrapped in list or map
err_msg = 'Error in property %s type %s value of %s ' %(
prop_name, prop_cls, prop_value)
return False, err_msg
# end for all properties
return True, ''
# end _validate_props_in_request
def _validate_refs_in_request(self, resource_class, obj_dict):
for ref_name in resource_class.ref_fields:
ref_fld_types_list = list(resource_class.ref_field_types[ref_name])
ref_link_type = ref_fld_types_list[1]
if ref_link_type == 'None':
continue
attr_cls = cfgm_common.utils.str_to_class(ref_link_type, __name__)
for ref_dict in obj_dict.get(ref_name) or []:
try:
self._validate_complex_type(attr_cls, ref_dict['attr'])
except Exception as e:
err_msg = 'Error validating reference %s value %s ' \
%(ref_name, ref_dict)
err_msg += str(e)
return False, err_msg
return True, ''
# end _validate_refs_in_request
def _validate_perms_in_request(self, resource_class, obj_type, obj_dict):
for ref_name in resource_class.ref_fields:
for ref in obj_dict.get(ref_name) or []:
try:
ref_uuid = ref['uuid']
except KeyError:
ref_uuid = self._db_conn.fq_name_to_uuid(ref_name[:-5],
ref['to'])
(ok, status) = self._permissions.check_perms_link(
get_request(), ref_uuid)
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
# end _validate_perms_in_request
def _validate_resource_type(self, type):
try:
r_class = self.get_resource_class(type)
return r_class.resource_type, r_class
except TypeError:
raise cfgm_common.exceptions.HttpError(
404, "Resource type '%s' not found" % type)
# end _validate_resource_type
def _ensure_services_conn(
self, api_name, obj_type, obj_uuid=None, obj_fq_name=None):
# If not connected to zookeeper do not allow operations that
# causes the state change
if not self._db_conn._zk_db.is_connected():
errmsg = 'No connection to zookeeper.'
fq_name_str = ':'.join(obj_fq_name or [])
self.config_object_error(
obj_uuid, fq_name_str, obj_type, api_name, errmsg)
raise cfgm_common.exceptions.HttpError(503, errmsg)
# If there are too many pending updates to rabbit, do not allow
# operations that cause state change
npending = self._db_conn.dbe_oper_publish_pending()
if (npending >= int(self._args.rabbit_max_pending_updates)):
err_str = str(MaxRabbitPendingError(npending))
raise cfgm_common.exceptions.HttpError(500, err_str)
# end _ensure_services_conn
def undo(self, result, obj_type, id=None, fq_name=None, counter=None, value=0):
(code, msg) = result
if counter:
counter = counter + value
get_context().invoke_undo(code, msg, self.config_log)
failed_stage = get_context().get_state()
self.config_object_error(
id, fq_name, obj_type, failed_stage, msg)
# end undo
# http_resource_<oper> - handlers invoked from
# a. bottle route (on-the-wire) OR
# b. internal requests
# using normalized get_request() from ApiContext
@log_api_stats
def http_resource_create(self, obj_type):
resource_type, r_class = self._validate_resource_type(obj_type)
obj_dict = get_request().json[resource_type]
# check visibility
user_visible = (obj_dict.get('id_perms') or {}).get('user_visible', True)
if not user_visible and not self.is_admin_request():
result = 'This object is not visible by users'
self.config_object_error(None, None, obj_type, 'http_post', result)
raise cfgm_common.exceptions.HttpError(400, result)
self._post_validate(obj_type, obj_dict=obj_dict)
fq_name = obj_dict['fq_name']
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_create' %(obj_type), obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_create an extension had error for %s' \
%(obj_type, obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# properties validator
ok, result = self._validate_props_in_request(r_class,
obj_dict, operation='CREATE')
if not ok:
result = 'Bad property in create: ' + result
raise cfgm_common.exceptions.HttpError(400, result)
# references validator
ok, result = self._validate_refs_in_request(r_class, obj_dict)
if not ok:
result = 'Bad reference in create: ' + result
raise cfgm_common.exceptions.HttpError(400, result)
# Can abort resource creation and retrun 202 status code
get_context().set_state('PENDING_DBE_CREATE')
ok, result = r_class.pending_dbe_create(obj_dict)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
if ok and isinstance(result, tuple) and result[0] == 202:
# Creation accepted but not applied, pending delete return 202 HTTP
# OK code to aware clients
pending_obj_dict = result[1]
bottle.response.status = 202
rsp_body = {}
rsp_body['fq_name'] = pending_obj_dict['fq_name']
rsp_body['uuid'] = pending_obj_dict['uuid']
rsp_body['name'] = pending_obj_dict['fq_name'][-1]
rsp_body['href'] = self.generate_url(resource_type,
pending_obj_dict['uuid'])
rsp_body['parent_type'] = pending_obj_dict['parent_type']
rsp_body['parent_uuid'] = pending_obj_dict['parent_uuid']
rsp_body['parent_href'] = self.generate_url(
pending_obj_dict['parent_type'],pending_obj_dict['parent_uuid'])
return {resource_type: rsp_body}
get_context().set_state('PRE_DBE_ALLOC')
# type-specific hook
ok, result = r_class.pre_dbe_alloc(obj_dict)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
# common handling for all resource create
(ok, result) = self._post_common(obj_type, obj_dict)
if not ok:
(code, msg) = result
fq_name_str = ':'.join(obj_dict.get('fq_name', []))
self.config_object_error(None, fq_name_str, obj_type, 'http_post', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
uuid_in_req = result
name = obj_dict['fq_name'][-1]
fq_name = obj_dict['fq_name']
db_conn = self._db_conn
# if client gave parent_type of config-root, ignore and remove
if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
del obj_dict['parent_type']
parent_class = None
if 'parent_type' in obj_dict:
# non config-root child, verify parent exists
parent_res_type, parent_class = self._validate_resource_type(
obj_dict['parent_type'])
parent_obj_type = parent_class.object_type
parent_res_type = parent_class.resource_type
parent_fq_name = obj_dict['fq_name'][:-1]
try:
parent_uuid = self._db_conn.fq_name_to_uuid(parent_obj_type,
parent_fq_name)
(ok, status) = self._permissions.check_perms_write(
get_request(), parent_uuid)
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
self._permissions.set_user_role(get_request(), obj_dict)
obj_dict['parent_uuid'] = parent_uuid
except NoIdError:
err_msg = 'Parent %s type %s does not exist' % (
pformat(parent_fq_name), parent_res_type)
fq_name_str = ':'.join(parent_fq_name)
self.config_object_error(None, fq_name_str, obj_type, 'http_post', err_msg)
raise cfgm_common.exceptions.HttpError(400, err_msg)
# Validate perms on references
try:
self._validate_perms_in_request(r_class, obj_type, obj_dict)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
400, 'Unknown reference in resource create %s.' %(obj_dict))
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
quota_counter = []
def stateful_create():
get_context().set_state('DBE_ALLOC')
# Alloc and Store id-mappings before creating entry on pubsub store.
# Else a subscriber can ask for an id mapping before we have stored it
(ok, result) = db_conn.dbe_alloc(obj_type, obj_dict, uuid_in_req)
if not ok:
return (ok, result)
get_context().push_undo(db_conn.dbe_release, obj_type, fq_name)
obj_id = result
env = get_request().headers.environ
tenant_name = env.get(hdr_server_tenant()) or 'default-project'
get_context().set_state('PRE_DBE_CREATE')
# type-specific hook
(ok, result) = r_class.pre_dbe_create(
tenant_name, obj_dict, db_conn)
if not ok:
return (ok, result)
callable = getattr(r_class, 'http_post_collection_fail', None)
if callable:
cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
ok, quota_limit, proj_uuid = r_class.get_quota_for_resource(obj_type,
obj_dict, db_conn)
if not ok:
return ok, quota_limit
get_context().set_state('DBE_CREATE')
if quota_limit >= 0:
path = self._path_prefix + proj_uuid + "/" + obj_type
if not self.quota_counter.get(path):
# Init quota counter
path_prefix = self._path_prefix + proj_uuid
try:
QuotaHelper._zk_quota_counter_init(
path_prefix, {obj_type: quota_limit}, proj_uuid,
self._db_conn, self.quota_counter)
except NoIdError:
msg = "Error in initializing quota "\
"Internal error : Failed to read resource count"
return (False, (404, msg))
(ok, result) = QuotaHelper.verify_quota_and_create_resource(
db_conn, obj_dict, obj_type, obj_id,
quota_limit, self.quota_counter[path])
if not ok:
return (ok, result)
else:
# To be used for reverting back count when undo() is called
quota_counter.append(self.quota_counter[path])
else:
#normal execution
(ok, result) = db_conn.dbe_create(obj_type, obj_id, obj_dict)
if not ok:
return (ok, result)
get_context().set_state('POST_DBE_CREATE')
# type-specific hook
try:
ok, result = r_class.post_dbe_create(tenant_name, obj_dict, db_conn)
except Exception as e:
ok = False
msg = ("%s:%s post_dbe_create had an exception: %s\n%s" %
(obj_type, obj_id, str(e),
cfgm_common.utils.detailed_traceback()))
result = (None, msg)
if not ok:
# Create is done, log to system, no point in informing user
self.config_log(result[1], level=SandeshLevel.SYS_ERR)
return True, obj_id
# end stateful_create
try:
ok, result = stateful_create()
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, err_msg)
if not ok:
fq_name_str = ':'.join(fq_name)
self.undo(result, obj_type, fq_name=fq_name_str,
counter=quota_counter, value=-1)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
# Initialize quota counter if resource is project
if resource_type == 'project' and 'quota' in obj_dict:
proj_id = obj_dict['uuid']
quota_dict = obj_dict.get('quota')
path_prefix = self._path_prefix + proj_id
if quota_dict:
try:
QuotaHelper._zk_quota_counter_init(path_prefix, quota_dict,
proj_id, db_conn, self.quota_counter)
except NoIdError:
err_msg = "Error in initializing quota "\
"Internal error : Failed to read resource count"
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
rsp_body = {}
rsp_body['name'] = name
rsp_body['fq_name'] = fq_name
rsp_body['uuid'] = result
rsp_body['href'] = self.generate_url(resource_type, result)
if parent_class:
# non config-root child, send back parent uuid/href
rsp_body['parent_type'] = obj_dict['parent_type']
rsp_body['parent_uuid'] = parent_uuid
rsp_body['parent_href'] = self.generate_url(parent_res_type,
parent_uuid)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_create' %(obj_type), obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In post_%s_create an extension had error for %s' \
%(obj_type, obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
return {resource_type: rsp_body}
# end http_resource_create
@log_api_stats
def http_resource_read(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_read' %(obj_type), id)
except Exception as e:
pass
etag = get_request().headers.get('If-None-Match')
db_conn = self._db_conn
try:
req_obj_type = db_conn.uuid_to_obj_type(id)
if req_obj_type != obj_type:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
fq_name = db_conn.uuid_to_fq_name(id)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
# common handling for all resource get
(ok, result) = self._get_common(get_request(), id)
if not ok:
(code, msg) = result
self.config_object_error(
id, None, obj_type, 'http_get', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
db_conn = self._db_conn
if etag:
(ok, result) = db_conn.dbe_is_latest(id, etag.strip('"'))
if not ok:
# Not present in DB
self.config_object_error(
id, None, obj_type, 'http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
is_latest = result
if is_latest:
# send Not-Modified, caches use this for read optimization
bottle.response.status = 304
return
# end if etag
# Generate field list for db layer
obj_fields = r_class.prop_fields | r_class.ref_fields
if 'fields' in get_request().query:
obj_fields |= set(get_request().query.fields.split(','))
else: # default props + children + refs + backrefs
if 'exclude_back_refs' not in get_request().query:
obj_fields |= r_class.backref_fields
if 'exclude_children' not in get_request().query:
obj_fields |= r_class.children_fields
(ok, result) = r_class.pre_dbe_read(id, fq_name, db_conn)
if not ok:
(code, msg) = result
raise cfgm_common.exceptions.HttpError(code, msg)
try:
(ok, result) = db_conn.dbe_read(obj_type, id,
list(obj_fields), ret_readonly=True)
if not ok:
self.config_object_error(id, None, obj_type, 'http_get', result)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
raise cfgm_common.exceptions.HttpError(500, result)
# check visibility
if (not result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(id, None, obj_type, 'http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
if not self.is_admin_request():
result = self.obj_view(resource_type, result)
(ok, err_msg) = r_class.post_dbe_read(result, db_conn)
if not ok:
(code, msg) = err_msg
raise cfgm_common.exceptions.HttpError(code, msg)
rsp_body = {}
rsp_body['uuid'] = id
rsp_body['name'] = result['fq_name'][-1]
if 'exclude_hrefs' not in get_request().query:
result = self.generate_hrefs(resource_type, result)
rsp_body.update(result)
id_perms = result['id_perms']
bottle.response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_read' %(obj_type), id, rsp_body)
except Exception as e:
pass
return {resource_type: rsp_body}
# end http_resource_read
# filter object references based on permissions
def obj_view(self, resource_type, obj_dict):
ret_obj_dict = {}
ret_obj_dict.update(obj_dict)
r_class = self.get_resource_class(resource_type)
obj_links = r_class.obj_links & set(obj_dict.keys())
obj_uuids = [ref['uuid'] for link in obj_links for ref in list(obj_dict[link])]
obj_dicts = self._db_conn._object_db.object_raw_read(
r_class.object_type, obj_uuids, ["perms2"])
uuid_to_obj_dict = dict((o['uuid'], o) for o in obj_dicts)
for link_field in obj_links:
links = obj_dict[link_field]
# build new links in returned dict based on permissions on linked object
ret_obj_dict[link_field] = [l for l in links
if ((l['uuid'] in uuid_to_obj_dict) and
(self._permissions.check_perms_read( get_request(),
l['uuid'], obj_dict=uuid_to_obj_dict[l['uuid']])[0] == True))]
return ret_obj_dict
# end obj_view
@log_api_stats
def http_resource_update(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
# Early return if there is no body or an empty body
request = get_request()
req_json = request.json
if not req_json or not req_json[resource_type]:
return
obj_dict = get_request().json[resource_type]
if 'perms2' in obj_dict:
if 'owner' not in obj_dict['perms2']:
raise cfgm_common.exceptions.HttpError(400,
'owner in perms2 must be present')
fields = r_class.prop_fields | r_class.ref_fields
try:
ok, result = self._db_conn.dbe_read(obj_type, id, fields)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
self.config_object_error(id, None, obj_type, 'http_resource_update',
result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
db_obj_dict = result
# Look if the resource have a pending version, if yes use it as resource
# to update
if hasattr(r_class, 'get_pending_resource'):
ok, result = r_class.get_pending_resource(db_obj_dict, fields)
if ok and isinstance(result, dict):
db_obj_dict = result
id = obj_dict['uuid'] = db_obj_dict['uuid']
if not ok and result[0] != 404:
self.config_object_error(
id, None, obj_type, 'http_resource_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
if resource_type == 'project' and 'quota' in db_obj_dict:
old_quota_dict = db_obj_dict['quota']
else:
old_quota_dict = None
self._put_common(
'http_put', obj_type, id, db_obj_dict, req_obj_dict=obj_dict,
quota_dict=old_quota_dict)
rsp_body = {}
rsp_body['uuid'] = id
rsp_body['href'] = self.generate_url(resource_type, id)
return {resource_type: rsp_body}
# end http_resource_update
@log_api_stats
def http_resource_delete(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
db_conn = self._db_conn
# if obj doesn't exist return early
try:
req_obj_type = db_conn.uuid_to_obj_type(id)
if req_obj_type != obj_type:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
_ = db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'ID %s does not exist' %(id))
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_delete' %(obj_type), id)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_delete an extension had error for %s' \
%(obj_type, id)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# read in obj from db (accepting error) to get details of it
try:
(read_ok, read_result) = db_conn.dbe_read(obj_type, id)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
if not read_ok:
self.config_object_error(
id, None, obj_type, 'http_delete', read_result)
# proceed down to delete the resource
# check visibility
if (not read_result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(id, None, obj_type, 'http_delete', result)
raise cfgm_common.exceptions.HttpError(404, result)
# common handling for all resource delete
parent_uuid = read_result.get('parent_uuid')
(ok, del_result) = self._delete_common(
get_request(), obj_type, id, parent_uuid)
if not ok:
(code, msg) = del_result
self.config_object_error(id, None, obj_type, 'http_delete', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
# Permit abort resource deletion and retrun 202 status code
get_context().set_state('PENDING_DBE_DELETE')
ok, result = r_class.pending_dbe_delete(read_result)
if (not ok and isinstance(result, tuple) and result[0] == 409 and
isinstance(result[1], set)):
# Found back reference to existing enforced or draft resource
exist_hrefs = [self.generate_url(type, uuid)
for type, uuid in result[1]]
msg = "Delete when resource still referred: %s" % exist_hrefs
self.config_object_error(id, None, obj_type, 'http_delete', msg)
raise cfgm_common.exceptions.HttpError(409, msg)
elif ok and isinstance(result, tuple) and result[0] == 202:
# Deletion accepted but not applied, pending delete
# return 202 HTTP OK code to aware clients
bottle.response.status = 202
return
elif not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
# fail if non-default children or non-derived backrefs exist
for child_field in r_class.children_fields:
child_type, is_derived = r_class.children_field_types[child_field]
if is_derived:
continue
child_cls = self.get_resource_class(child_type)
default_child_name = 'default-%s' %(
child_cls(parent_type=obj_type).get_type())
exist_hrefs = []
for child in read_result.get(child_field, []):
if child['to'][-1] in [default_child_name,
POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT]:
continue
exist_hrefs.append(
self.generate_url(child_type, child['uuid']))
if exist_hrefs:
err_msg = 'Delete when children still present: %s' %(
exist_hrefs)
self.config_object_error(
id, None, obj_type, 'http_delete', err_msg)
raise cfgm_common.exceptions.HttpError(409, err_msg)
relaxed_refs = set(db_conn.dbe_get_relaxed_refs(id))
for backref_field in r_class.backref_fields:
backref_type, _, is_derived = \
r_class.backref_field_types[backref_field]
if is_derived:
continue
exist_hrefs = [self.generate_url(backref_type, backref['uuid'])
for backref in read_result.get(backref_field, [])
if backref['uuid'] not in relaxed_refs]
if exist_hrefs:
err_msg = 'Delete when resource still referred: %s' %(
exist_hrefs)
self.config_object_error(
id, None, obj_type, 'http_delete', err_msg)
raise cfgm_common.exceptions.HttpError(409, err_msg)
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
quota_counter = []
def stateful_delete():
get_context().set_state('PRE_DBE_DELETE')
proj_id = r_class.get_project_id_for_resource(read_result, obj_type,
db_conn)
(ok, del_result) = r_class.pre_dbe_delete(
id, read_result, db_conn)
if not ok:
return (ok, del_result)
# Delete default children first
for child_field in r_class.children_fields:
child_type, is_derived = r_class.children_field_types[child_field]
if is_derived:
continue
if child_field in self._GENERATE_DEFAULT_INSTANCE:
self.delete_default_children(child_type, read_result)
callable = getattr(r_class, 'http_delete_fail', None)
if callable:
cleanup_on_failure.append((callable, [id, read_result, db_conn]))
get_context().set_state('DBE_DELETE')
(ok, del_result) = db_conn.dbe_delete(obj_type, id, read_result)
if not ok:
return (ok, del_result)
if proj_id:
(ok, proj_dict) = QuotaHelper.get_project_dict_for_quota(
proj_id, db_conn)
if not ok:
return ok, proj_dict
quota_limit = QuotaHelper.get_quota_limit(proj_dict, obj_type)
path = self._path_prefix + proj_id + "/" + obj_type
if quota_limit > 0:
if self.quota_counter.get(path):
self.quota_counter[path] -= 1
else:
# quota counter obj not initialized
# in this api-server, Init counter
path_prefix = self._path_prefix + proj_id
QuotaHelper._zk_quota_counter_init(
path_prefix, {obj_type : quota_limit},
proj_id, db_conn, self.quota_counter)
if db_conn._zk_db.quota_counter_exists(path):
self.quota_counter[path] -= 1
quota_counter.append(self.quota_counter.get(path))
elif self.quota_counter.get(path):
# quota limit is modified to unlimited
# delete counter object
del self.quota_counter[path]
# type-specific hook
get_context().set_state('POST_DBE_DELETE')
try:
ok, result = r_class.post_dbe_delete(id, read_result, db_conn)
except Exception as e:
ok = False
msg = ("%s:%s post_dbe_delete had an exception: %s\n%s" %
(obj_type, id, str(e),
cfgm_common.utils.detailed_traceback()))
result = (None, msg)
if not ok:
# Delete is done, log to system, no point in informing user
self.config_log(result[1], level=SandeshLevel.SYS_ERR)
return (True, '')
# end stateful_delete
try:
ok, result = stateful_delete()
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, err_msg)
if not ok:
self.undo(result, obj_type, id=id, counter=quota_counter, value=1)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_delete' %(obj_type), id, read_result)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_delete an extension had error for %s' \
%(obj_type, id)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# end http_resource_delete
@log_api_stats
def http_resource_list(self, obj_type):
resource_type, r_class = self._validate_resource_type(obj_type)
db_conn = self._db_conn
env = get_request().headers.environ
parent_uuids = None
back_ref_uuids = None
obj_uuids = None
pagination = {}
if 'parent_fq_name_str' in get_request().query:
parent_uuids = []
parent_fq_name = get_request().query.parent_fq_name_str.split(':')
parent_types = r_class.parent_types
if 'parent_type' in get_request().query:
parent_types = [get_request().query.parent_type]
for parent_type in parent_types:
_, p_class = self._validate_resource_type(parent_type)
try:
parent_uuids.append(
self._db_conn.fq_name_to_uuid(p_class.object_type,
parent_fq_name),
)
except cfgm_common.exceptions.NoIdError:
pass
elif 'parent_id' in get_request().query:
parent_uuids = get_request().query.parent_id.split(',')
if 'back_ref_id' in get_request().query:
back_ref_uuids = get_request().query.back_ref_id.split(',')
if 'obj_uuids' in get_request().query:
obj_uuids = get_request().query.obj_uuids.split(',')
if 'fq_names' in get_request().query:
obj_fqn_strs = get_request().query.fq_names.split(',')
obj_uuid = None
for obj_fqn_str in obj_fqn_strs:
try:
obj_fqn = obj_fqn_str.split(':')
obj_uuid = self._db_conn.fq_name_to_uuid(obj_type, obj_fqn)
if obj_uuids is None:
obj_uuids = []
obj_uuids.append(obj_uuid)
except cfgm_common.exceptions.NoIdError as e:
pass
if obj_uuids is None:
return {'%ss' %(resource_type): []}
if 'page_marker' in get_request().query:
pagination['marker'] = self._validate_page_marker(
get_request().query['page_marker'])
if 'page_limit' in get_request().query:
pagination['limit'] = self._validate_page_limit(
get_request().query['page_limit'])
# common handling for all resource get
for parent_uuid in list(parent_uuids or []):
(ok, result) = self._get_common(get_request(), parent_uuid)
if not ok:
parent_uuids.remove(parent_uuid)
if obj_uuids is None and back_ref_uuids is None and parent_uuids == []:
return {'%ss' %(resource_type): []}
if 'count' in get_request().query:
is_count = 'true' in get_request().query.count.lower()
else:
is_count = False
if 'detail' in get_request().query:
is_detail = 'true' in get_request().query.detail.lower()
else:
is_detail = False
if 'fields' in get_request().query:
req_fields = get_request().query.fields.split(',')
else:
req_fields = []
if 'shared' in get_request().query:
include_shared = 'true' in get_request().query.shared.lower()
else:
include_shared = False
try:
filters = utils.get_filters(get_request().query.filters)
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid filter ' + get_request().query.filters)
if 'exclude_hrefs' in get_request().query:
exclude_hrefs = True
else:
exclude_hrefs = False
return self._list_collection(obj_type, parent_uuids, back_ref_uuids,
obj_uuids, is_count, is_detail, filters,
req_fields, include_shared, exclude_hrefs,
pagination)
# end http_resource_list
# internal_request_<oper> - handlers of internally generated requests
# that save-ctx, generate-ctx and restore-ctx
def internal_request_create(self, resource_type, obj_json):
object_type = self.get_resource_class(resource_type).object_type
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%ss' %(resource_type),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
json_as_dict = {'%s' %(resource_type): obj_json}
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
json_as_dict, None)
set_context(context.ApiContext(internal_req=i_req))
resp = self.http_resource_create(object_type)
return True, resp
finally:
set_context(orig_context)
# end internal_request_create
def internal_request_update(self, resource_type, obj_uuid, obj_json):
object_type = self.get_resource_class(resource_type).object_type
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%ss' %(resource_type),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
json_as_dict = {'%s' %(resource_type): obj_json}
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
json_as_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.http_resource_update(object_type, obj_uuid)
return True, ""
finally:
set_context(orig_context)
# end internal_request_update
def internal_request_delete(self, resource_type, obj_uuid):
object_type = self.get_resource_class(resource_type).object_type
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%s/%s' %(resource_type, obj_uuid),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
None, None)
set_context(context.ApiContext(internal_req=i_req))
self.http_resource_delete(object_type, obj_uuid)
return True, ""
finally:
set_context(orig_context)
# end internal_request_delete
def internal_request_ref_update(self, res_type, obj_uuid, operation,
ref_res_type, ref_uuid=None,
ref_fq_name=None, attr=None,
relax_ref_for_delete=False):
req_dict = {'type': res_type,
'uuid': obj_uuid,
'operation': operation,
'ref-type': ref_res_type,
'ref-uuid': ref_uuid,
'ref-fq-name': ref_fq_name,
'attr': attr,
'relax_ref_for_delete': relax_ref_for_delete}
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/ref-update',
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
req_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.ref_update_http_post()
return True, ""
finally:
set_context(orig_context)
# end internal_request_ref_update
def internal_request_prop_collection(self, obj_uuid, updates=None):
req_dict = {
'uuid': obj_uuid,
'updates': updates or [],
}
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/ref-update',
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
req_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.prop_collection_http_post()
return True, ''
finally:
set_context(orig_context)
def alloc_vn_id(self, name):
return self._db_conn._zk_db.alloc_vn_id(name)
def alloc_tag_value_id(self, tag_type, name):
return self._db_conn._zk_db.alloc_tag_value_id(tag_type, name)
def create_default_children(self, object_type, parent_obj):
childs = self.get_resource_class(object_type).children_field_types
# Create a default child only if provisioned for
child_types = {type for _, (type, derivate) in childs.items()
if (not derivate and
type in self._GENERATE_DEFAULT_INSTANCE)}
if not child_types:
return True, ''
for child_type in child_types:
child_cls = self.get_resource_class(child_type)
child_obj_type = child_cls.object_type
child_obj = child_cls(parent_obj=parent_obj)
child_dict = child_obj.__dict__
child_dict['id_perms'] = self._get_default_id_perms()
child_dict['perms2'] = self._get_default_perms2()
(ok, result) = self._db_conn.dbe_alloc(child_obj_type, child_dict)
if not ok:
return (ok, result)
obj_id = result
# For virtual networks, allocate an ID
if child_obj_type == 'virtual_network':
child_dict['virtual_network_network_id'] = self.alloc_vn_id(
child_obj.get_fq_name_str())
(ok, result) = self._db_conn.dbe_create(child_obj_type, obj_id,
child_dict)
if not ok:
# DB Create failed, log and stop further child creation.
err_msg = "DB Create failed creating %s" % child_type
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
return (ok, result)
# recurse down type hierarchy
ok, result = self.create_default_children(child_obj_type,
child_obj)
if not ok:
return False, result
return True, ''
# end create_default_children
def delete_default_children(self, resource_type, parent_dict):
r_class = self.get_resource_class(resource_type)
for child_field in r_class.children_fields:
# Delete a default child only if provisioned for
child_type, is_derived = r_class.children_field_types[child_field]
if child_type not in self._GENERATE_DEFAULT_INSTANCE:
continue
child_cls = self.get_resource_class(child_type)
# first locate default child then delete it")
default_child_name = 'default-%s' %(child_type)
child_infos = parent_dict.get(child_field, [])
for child_info in child_infos:
if child_info['to'][-1] == default_child_name:
default_child_id = child_info['uuid']
self.http_resource_delete(child_type, default_child_id)
break
# end delete_default_children
@classmethod
def _generate_resource_crud_methods(cls, obj):
for object_type, _ in all_resource_type_tuples:
create_method = functools.partial(obj.http_resource_create,
object_type)
functools.update_wrapper(create_method, obj.http_resource_create)
setattr(obj, '%ss_http_post' %(object_type), create_method)
read_method = functools.partial(obj.http_resource_read,
object_type)
functools.update_wrapper(read_method, obj.http_resource_read)
setattr(obj, '%s_http_get' %(object_type), read_method)
update_method = functools.partial(obj.http_resource_update,
object_type)
functools.update_wrapper(update_method, obj.http_resource_update)
setattr(obj, '%s_http_put' %(object_type), update_method)
delete_method = functools.partial(obj.http_resource_delete,
object_type)
functools.update_wrapper(delete_method, obj.http_resource_delete)
setattr(obj, '%s_http_delete' %(object_type), delete_method)
list_method = functools.partial(obj.http_resource_list,
object_type)
functools.update_wrapper(list_method, obj.http_resource_list)
setattr(obj, '%ss_http_get' %(object_type), list_method)
# end _generate_resource_crud_methods
@classmethod
def _generate_resource_crud_uri(cls, obj):
for object_type, resource_type in all_resource_type_tuples:
# CRUD + list URIs of the form
# obj.route('/virtual-network/<id>', 'GET', obj.virtual_network_http_get)
# obj.route('/virtual-network/<id>', 'PUT', obj.virtual_network_http_put)
# obj.route('/virtual-network/<id>', 'DELETE', obj.virtual_network_http_delete)
# obj.route('/virtual-networks', 'POST', obj.virtual_networks_http_post)
# obj.route('/virtual-networks', 'GET', obj.virtual_networks_http_get)
# leaf resource
obj.route('/%s/<id>' %(resource_type),
'GET',
getattr(obj, '%s_http_get' %(object_type)))
obj.route('/%s/<id>' %(resource_type),
'PUT',
getattr(obj, '%s_http_put' %(object_type)))
obj.route('/%s/<id>' %(resource_type),
'DELETE',
getattr(obj, '%s_http_delete' %(object_type)))
# collection of leaf
obj.route('/%ss' %(resource_type),
'POST',
getattr(obj, '%ss_http_post' %(object_type)))
obj.route('/%ss' %(resource_type),
'GET',
getattr(obj, '%ss_http_get' %(object_type)))
# end _generate_resource_crud_uri
def __init__(self, args_str=None):
self._db_conn = None
self._resource_classes = {}
self._args = None
self._path_prefix = _DEFAULT_ZK_COUNTER_PATH_PREFIX
self.quota_counter = {}
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
self.lock_path_prefix = '%s/%s' % (self._args.cluster_id,
_DEFAULT_ZK_LOCK_PATH_PREFIX)
self.security_lock_prefix = '%s/security' % self.lock_path_prefix
# set the max size of the api requests
bottle.BaseRequest.MEMFILE_MAX = self._args.max_request_size
# multi_tenancy is ignored if aaa_mode is configured by user
if self._args.aaa_mode is not None:
if self.aaa_mode not in AAA_MODE_VALID_VALUES:
self.aaa_mode = AAA_MODE_DEFAULT_VALUE
elif self._args.multi_tenancy is not None:
# MT configured by user - determine from aaa-mode
self.aaa_mode = "cloud-admin" if self._args.multi_tenancy else "no-auth"
else:
self.aaa_mode = "cloud-admin"
# set python logging level from logging_level cmdline arg
if not self._args.logging_conf:
logging.basicConfig(level = getattr(logging, self._args.logging_level))
self._base_url = "http://%s:%s" % (self._args.listen_ip_addr,
self._args.listen_port)
# Generate LinkObjects for all entities
links = []
# Link for root
links.append(LinkObject('root', self._base_url , '/config-root',
'config-root'))
for _, resource_type in all_resource_type_tuples:
link = LinkObject('collection',
self._base_url , '/%ss' %(resource_type),
'%s' %(resource_type))
links.append(link)
for _, resource_type in all_resource_type_tuples:
link = LinkObject('resource-base',
self._base_url , '/%s' %(resource_type),
'%s' %(resource_type))
links.append(link)
self._homepage_links = links
self._pipe_start_app = None
#GreenletProfiler.set_clock_type('wall')
self._profile_info = None
for act_res in _ACTION_RESOURCES:
link = LinkObject('action', self._base_url, act_res['uri'],
act_res['link_name'], act_res['method'])
self._homepage_links.append(link)
# Register for VN delete request. Disallow delete of system default VN
self.route('/virtual-network/<id>', 'DELETE', self.virtual_network_http_delete)
self.route('/documentation/<filename:path>',
'GET', self.documentation_http_get)
self._homepage_links.insert(
0, LinkObject('documentation', self._base_url,
'/documentation/index.html',
'documentation', 'GET'))
# APIs to reserve/free block of IP address from a VN/Subnet
self.route('/virtual-network/<id>/ip-alloc',
'POST', self.vn_ip_alloc_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/ip-alloc',
'virtual-network-ip-alloc', 'POST'))
self.route('/virtual-network/<id>/ip-free',
'POST', self.vn_ip_free_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/ip-free',
'virtual-network-ip-free', 'POST'))
# APIs to find out number of ip instances from given VN subnet
self.route('/virtual-network/<id>/subnet-ip-count',
'POST', self.vn_subnet_ip_count_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/subnet-ip-count',
'virtual-network-subnet-ip-count', 'POST'))
# Enable/Disable aaa mode
self.route('/aaa-mode', 'GET', self.aaa_mode_http_get)
self.route('/aaa-mode', 'PUT', self.aaa_mode_http_put)
# Set Tag actions
self.route('/set-tag', 'POST', self.set_tag)
self._homepage_links.append(
LinkObject('action', self._base_url, '/set-tag', 'set-tag',
'POST'))
# Commit or discard draft security policy
self.route('/security-policy-draft', 'POST',
self.security_policy_draft)
self._homepage_links.append(
LinkObject('action', self._base_url, '/security-policy-draft',
'security-policy-draft', 'POST'))
# randomize the collector list
self._random_collectors = self._args.collectors
self._chksum = "";
if self._args.collectors:
self._chksum = hashlib.md5(''.join(self._args.collectors)).hexdigest()
self._random_collectors = random.sample(self._args.collectors, \
len(self._args.collectors))
# sandesh init
self._sandesh = Sandesh()
# Reset the sandesh send rate limit value
if self._args.sandesh_send_rate_limit is not None:
SandeshSystem.set_sandesh_send_rate_limit(
self._args.sandesh_send_rate_limit)
module = Module.API_SERVER
module_name = ModuleNames[Module.API_SERVER]
node_type = Module2NodeType[module]
node_type_name = NodeTypeNames[node_type]
self.table = "ObjectConfigNode"
if self._args.worker_id:
instance_id = self._args.worker_id
else:
instance_id = INSTANCE_ID_DEFAULT
hostname = socket.gethostname()
self._sandesh.init_generator(module_name, hostname,
node_type_name, instance_id,
self._random_collectors,
'vnc_api_server_context',
int(self._args.http_server_port),
['cfgm_common', 'vnc_cfg_api_server.sandesh'],
logger_class=self._args.logger_class,
logger_config_file=self._args.logging_conf,
config=self._args.sandesh_config)
self._sandesh.trace_buffer_create(name="VncCfgTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="RestApiTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="DBRequestTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="DBUVERequestTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="MessageBusNotifyTraceBuf",
size=1000)
VncGreenlet.register_sandesh_handler()
self._sandesh.set_logging_params(
enable_local_log=self._args.log_local,
category=self._args.log_category,
level=self._args.log_level,
file=self._args.log_file,
enable_syslog=self._args.use_syslog,
syslog_facility=self._args.syslog_facility)
ConnectionState.init(self._sandesh, hostname, module_name,
instance_id,
staticmethod(ConnectionState.get_conn_state_cb),
NodeStatusUVE, NodeStatus, self.table)
# Address Management interface
addr_mgmt = vnc_addr_mgmt.AddrMgmt(self)
self._addr_mgmt = addr_mgmt
# DB interface initialization
if self._args.wipe_config:
self._db_connect(True)
else:
self._db_connect(self._args.reset_config)
self._db_init_entries()
# ZK quota counter initialization
(ok, project_list, _) = self._db_conn.dbe_list('project',
field_names=['quota'])
if not ok:
(code, err_msg) = project_list # status
raise cfgm_common.exceptions.HttpError(code, err_msg)
for project in project_list or []:
if project.get('quota'):
path_prefix = self._path_prefix + project['uuid']
try:
QuotaHelper._zk_quota_counter_init(
path_prefix, project['quota'], project['uuid'],
self._db_conn, self.quota_counter)
except NoIdError:
err_msg = "Error in initializing quota "\
"Internal error : Failed to read resource count"
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
# API/Permissions check
# after db init (uses db_conn)
self._rbac = vnc_rbac.VncRbac(self, self._db_conn)
self._permissions = vnc_perms.VncPermissions(self, self._args)
if self.is_rbac_enabled():
self._create_default_rbac_rule()
if self.is_auth_needed():
self._generate_obj_view_links()
if os.path.exists('/usr/bin/contrail-version'):
cfgm_cpu_uve = ModuleCpuState()
cfgm_cpu_uve.name = socket.gethostname()
cfgm_cpu_uve.config_node_ip = self.get_server_ip()
command = "contrail-version contrail-config | grep 'contrail-config'"
version = os.popen(command).read()
_, rpm_version, build_num = version.split()
cfgm_cpu_uve.build_info = build_info + '"build-id" : "' + \
rpm_version + '", "build-number" : "' + \
build_num + '"}]}'
cpu_info_trace = ModuleCpuStateTrace(data=cfgm_cpu_uve, sandesh=self._sandesh)
cpu_info_trace.send(sandesh=self._sandesh)
self.re_uuid = re.compile('^[0-9A-F]{8}-?[0-9A-F]{4}-?4[0-9A-F]{3}-?[89AB][0-9A-F]{3}-?[0-9A-F]{12}$',
re.IGNORECASE)
# Load extensions
self._extension_mgrs = {}
self._load_extensions()
# Authn/z interface
if self._args.auth == 'keystone':
auth_svc = vnc_auth_keystone.AuthServiceKeystone(self, self._args)
else:
auth_svc = vnc_auth.AuthService(self, self._args)
self._pipe_start_app = auth_svc.get_middleware_app()
self._auth_svc = auth_svc
if int(self._args.worker_id) == 0:
try:
self._extension_mgrs['resync'].map(
self._resync_domains_projects)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
# following allowed without authentication
self.white_list = [
'^/documentation', # allow all documentation
'^/$', # allow discovery
]
self._global_asn = None
# map of running job instances. Key is the pid and value is job
# instance info
self._job_mgr_running_instances = {}
# end __init__
@property
def global_autonomous_system(self):
if not self._global_asn:
gsc_class = self.get_resource_class(GlobalSystemConfig.object_type)
ok, result = gsc_class.locate(uuid=self._gsc_uuid, create_it=False,
fields=['autonomous_system'])
if not ok:
msg = ("Cannot fetch Global System Config to obtain "
"autonomous system")
raise cfgm_common.exceptions.VncError(msg)
self._global_asn = result['autonomous_system']
return self._global_asn
@global_autonomous_system.setter
def global_autonomous_system(self, asn):
self._global_asn = asn
def _extensions_transform_request(self, request):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'transform_request', request)
# end _extensions_transform_request
def _extensions_validate_request(self, request):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'validate_request', request)
# end _extensions_validate_request
def _extensions_transform_response(self, request, response):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'transform_response', request, response)
# end _extensions_transform_response
@ignore_exceptions
def _generate_rest_api_request_trace(self):
method = get_request().method.upper()
if method == 'GET':
return None
req_id = get_request().headers.get('X-Request-Id',
'req-%s' %(str(uuid.uuid4())))
gevent.getcurrent().trace_request_id = req_id
url = get_request().url
if method == 'DELETE':
req_data = ''
else:
try:
req_data = json.dumps(get_request().json)
except Exception as e:
req_data = '%s: Invalid request body' %(e)
rest_trace = RestApiTrace(request_id=req_id)
rest_trace.url = url
rest_trace.method = method
rest_trace.request_data = req_data
# Also log keystone response time against this request id,
# before returning the trace message.
if ((get_context().get_keystone_response_time()) is not None):
response_time = get_context().get_keystone_response_time()
response_time_in_usec = ((response_time.days*24*60*60) +
(response_time.seconds*1000000) +
response_time.microseconds)
stats = VncApiLatencyStats(
operation_type='VALIDATE',
application='KEYSTONE',
response_time_in_usec=response_time_in_usec,
response_size=0,
identifier=req_id,
)
stats_log = VncApiLatencyStatsLog(node_name="issu-vm6", api_latency_stats=stats, sandesh=self._sandesh)
x=stats_log.send(sandesh=self._sandesh)
return rest_trace
# end _generate_rest_api_request_trace
@ignore_exceptions
def _generate_rest_api_response_trace(self, rest_trace, response):
if not rest_trace:
return
rest_trace.status = bottle.response.status
rest_trace.response_body = json.dumps(response)
rest_trace.trace_msg(name='RestApiTraceBuf', sandesh=self._sandesh)
# end _generate_rest_api_response_trace
# Public Methods
def route(self, uri, method, handler):
@use_context
def handler_trap_exception(*args, **kwargs):
try:
trace = None
self._extensions_transform_request(get_request())
self._extensions_validate_request(get_request())
trace = self._generate_rest_api_request_trace()
(ok, status) = self._rbac.validate_request(get_request())
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
response = handler(*args, **kwargs)
self._generate_rest_api_response_trace(trace, response)
self._extensions_transform_response(get_request(), response)
return response
except Exception as e:
if trace:
trace.trace_msg(name='RestApiTraceBuf',
sandesh=self._sandesh)
# don't log details of cfgm_common.exceptions.HttpError i.e handled error cases
if isinstance(e, cfgm_common.exceptions.HttpError):
bottle.abort(e.status_code, e.content)
else:
string_buf = StringIO()
cgitb_hook(file=string_buf, format="text")
err_msg = string_buf.getvalue()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
raise
self.api_bottle.route(uri, method, handler_trap_exception)
# end route
def get_args(self):
return self._args
# end get_args
def get_server_ip(self):
ip_list = []
for i in netifaces.interfaces():
try:
if netifaces.AF_INET in netifaces.ifaddresses(i):
addr = netifaces.ifaddresses(i)[netifaces.AF_INET][0][
'addr']
if addr != '127.0.0.1' and addr not in ip_list:
ip_list.append(addr)
except ValueError as e:
self.config_log("Skipping interface %s: %s" % (i, str(e)),
level=SandeshLevel.SYS_DEBUG)
return ip_list
# end get_server_ip
def get_listen_ip(self):
return self._args.listen_ip_addr
# end get_listen_ip
def get_server_port(self):
return self._args.listen_port
# end get_server_port
def get_worker_id(self):
return int(self._args.worker_id)
# end get_worker_id
def get_pipe_start_app(self):
return self._pipe_start_app
# end get_pipe_start_app
def get_rabbit_health_check_interval(self):
return float(self._args.rabbit_health_check_interval)
# end get_rabbit_health_check_interval
def is_auth_disabled(self):
return self._args.auth is None or self._args.auth.lower() != 'keystone'
def is_admin_request(self):
if not self.is_auth_needed():
return True
if is_internal_request():
return True
env = bottle.request.headers.environ
roles = []
for field in ('HTTP_X_API_ROLE', 'HTTP_X_ROLE'):
if field in env:
roles.extend(env[field].split(','))
return has_role(self.cloud_admin_role, roles)
def get_auth_headers_from_token(self, request, token):
if self.is_auth_disabled() or not self.is_auth_needed():
return {}
return self._auth_svc.get_auth_headers_from_token(request, token)
# end get_auth_headers_from_token
def _generate_obj_view_links(self):
for object_type, resource_type in all_resource_type_tuples:
r_class = self.get_resource_class(resource_type)
r_class.obj_links = (r_class.ref_fields | r_class.backref_fields | r_class.children_fields)
# Check for the system created VN. Disallow such VN delete
def virtual_network_http_delete(self, id):
db_conn = self._db_conn
# if obj doesn't exist return early
try:
obj_type = db_conn.uuid_to_obj_type(id)
if obj_type != 'virtual_network':
raise cfgm_common.exceptions.HttpError(
404, 'No virtual-network object found for id %s' %(id))
vn_name = db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'ID %s does not exist' %(id))
if (vn_name == cfgm_common.IP_FABRIC_VN_FQ_NAME or
vn_name == cfgm_common.LINK_LOCAL_VN_FQ_NAME):
raise cfgm_common.exceptions.HttpError(
409,
'Can not delete system created default virtual-network '+id)
super(VncApiServer, self).virtual_network_http_delete(id)
# end
@use_context
def homepage_http_get(self):
json_body = {}
json_links = []
# strip trailing '/' in url
url = get_request().url[:-1]
url = url.replace('<script>', '<!--script>')
url = url.replace('</script>', '</script-->')
for link in self._homepage_links:
# strip trailing '/' in url
json_links.append(
{'link': link.to_dict(with_url=url)}
)
json_body = {"href": url, "links": json_links}
return json_body
# end homepage_http_get
def documentation_http_get(self, filename):
# ubuntu packaged path
doc_root = '/usr/share/doc/contrail-config/doc/contrail-config/html/'
if not os.path.exists(doc_root):
# centos packaged path
doc_root='/usr/share/doc/python-vnc_cfg_api_server/contrial-config/html/'
return bottle.static_file(
filename,
root=doc_root)
# end documentation_http_get
def obj_perms_http_get(self):
if self.is_auth_disabled() or not self.is_auth_needed():
result = {
'token_info': None,
'is_cloud_admin_role': False,
'is_global_read_only_role': False,
'permissions': 'RWX'
}
return result
obj_uuid = None
if 'uuid' in get_request().query:
obj_uuid = get_request().query.uuid
ok, result = self._auth_svc.validate_user_token()
if not ok:
code, msg = result
self.config_object_error(obj_uuid, None, None,
'obj_perms_http_get', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
token_info = result
# roles in result['token_info']['access']['user']['roles']
result = {'token_info': token_info}
# Handle v2 and v3 responses
roles_list = []
if 'access' in token_info:
roles_list = [roles['name'] for roles in
token_info['access']['user']['roles']]
elif 'token' in token_info:
roles_list = [roles['name'] for roles in
token_info['token']['roles']]
result['is_cloud_admin_role'] = has_role(self.cloud_admin_role,
roles_list)
result['is_global_read_only_role'] = has_role(
self.global_read_only_role, roles_list)
if obj_uuid:
result['permissions'] = self._permissions.obj_perms(get_request(),
obj_uuid)
if 'token' in token_info.keys():
if 'project' in token_info['token'].keys():
domain = None
try:
domain = token_info['token']['project']['domain']['id']
domain = str(uuid.UUID(domain))
except ValueError, TypeError:
if domain == 'default':
domain = 'default-domain'
domain = self._db_conn.fq_name_to_uuid('domain', [domain])
if domain:
domain = domain.replace('-', '')
token_info['token']['project']['domain']['id'] = domain
return result
# end obj_perms_http_get
def invalid_uuid(self, uuid):
return self.re_uuid.match(uuid) is None
def invalid_access(self, access):
return type(access) is not int or access not in range(0, 8)
def invalid_share_type(self, share_type):
return share_type not in cfgm_common.PERMS2_VALID_SHARE_TYPES
# change ownership of an object
def obj_chown_http_post(self):
obj_uuid = get_request().json.get('uuid')
owner = get_request().json.get('owner')
if obj_uuid is None:
msg = "Bad Request, no resource UUID provided to chown"
raise cfgm_common.exceptions.HttpError(400, msg)
if owner is None:
msg = "Bad Request, no owner UUID provided to chown"
raise cfgm_common.exceptions.HttpError(400, msg)
if self.invalid_uuid(obj_uuid):
msg = "Bad Request, invalid resource UUID"
raise cfgm_common.exceptions.HttpError(400, msg)
if self.invalid_uuid(owner):
msg = "Bad Request, invalid owner UUID"
raise cfgm_common.exceptions.HttpError(400, msg)
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
self._ensure_services_conn('chown', obj_type, obj_uuid=obj_uuid)
# ensure user has RW permissions to object
perms = self._permissions.obj_perms(get_request(), obj_uuid)
if not 'RW' in perms:
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
try:
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=['perms2'])
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
obj_dict['perms2']['owner'] = owner
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
msg = "chown: %s owner set to %s" % (obj_uuid, owner)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
return {}
#end obj_chown_http_post
def dump_cache(self):
self._post_common(None, {})
req_dict = get_request().json or {}
obj_uuids = req_dict.get('uuids', [])
count = req_dict.get('count', 10)
return self._db_conn._object_db._obj_cache_mgr.dump_cache(
obj_uuids=obj_uuids, count=count)
# chmod for an object
def obj_chmod_http_post(self):
try:
obj_uuid = get_request().json['uuid']
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
if self.invalid_uuid(obj_uuid):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid object id")
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
self._ensure_services_conn('chmod', obj_type, obj_uuid=obj_uuid)
# ensure user has RW permissions to object
perms = self._permissions.obj_perms(get_request(), obj_uuid)
if not 'RW' in perms:
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
request_params = get_request().json
owner = request_params.get('owner')
share = request_params.get('share')
owner_access = request_params.get('owner_access')
global_access = request_params.get('global_access')
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=['perms2', 'is_shared'])
obj_perms = obj_dict['perms2']
old_perms = '%s/%d %d %s' % (obj_perms['owner'],
obj_perms['owner_access'], obj_perms['global_access'],
['%s:%d' % (item['tenant'], item['tenant_access']) for item in obj_perms['share']])
if owner:
if self.invalid_uuid(owner):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid owner")
obj_perms['owner'] = owner.replace('-','')
if owner_access is not None:
if self.invalid_access(owner_access):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid owner_access value")
obj_perms['owner_access'] = owner_access
if share is not None:
try:
for item in share:
"""
item['tenant'] := [<share_type>:] <uuid>
share_type := ['domain' | 'tenant']
"""
(share_type, share_id) = cfgm_common.utils.shareinfo_from_perms2_tenant(item['tenant'])
if self.invalid_share_type(share_type) or self.invalid_uuid(share_id) or self.invalid_access(item['tenant_access']):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid share list")
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
obj_perms['share'] = share
if global_access is not None:
if self.invalid_access(global_access):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid global_access value")
obj_perms['global_access'] = global_access
obj_dict['is_shared'] = (global_access != 0)
new_perms = '%s/%d %d %s' % (obj_perms['owner'],
obj_perms['owner_access'], obj_perms['global_access'],
['%s:%d' % (item['tenant'], item['tenant_access']) for item in obj_perms['share']])
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
msg = "chmod: %s perms old=%s, new=%s" % (obj_uuid, old_perms, new_perms)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
return {}
# end obj_chmod_http_post
def prop_collection_http_get(self):
if 'uuid' not in get_request().query:
raise cfgm_common.exceptions.HttpError(
400, 'Object uuid needed for property collection get')
obj_uuid = get_request().query.uuid
if 'fields' not in get_request().query:
raise cfgm_common.exceptions.HttpError(
400, 'Object fields needed for property collection get')
obj_fields = get_request().query.fields.split(',')
if 'position' in get_request().query:
fields_position = get_request().query.position
else:
fields_position = None
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Object Not Found: ' + obj_uuid)
resource_class = self.get_resource_class(obj_type)
for obj_field in obj_fields:
if ((obj_field not in resource_class.prop_list_fields) and
(obj_field not in resource_class.prop_map_fields)):
err_msg = '%s neither "ListProperty" nor "MapProperty"' %(
obj_field)
raise cfgm_common.exceptions.HttpError(400, err_msg)
# request validations over
# common handling for all resource get
(ok, result) = self._get_common(get_request(), obj_uuid)
if not ok:
(code, msg) = result
self.config_object_error(
obj_uuid, None, None, 'prop_collection_http_get', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
try:
ok, result = self._db_conn.prop_collection_get(
obj_type, obj_uuid, obj_fields, fields_position)
if not ok:
self.config_object_error(
obj_uuid, None, None, 'prop_collection_http_get', result)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
raise cfgm_common.exceptions.HttpError(500, result)
# check visibility
if (not result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(
id, None, None, 'prop_collection_http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
# Prepare response
del result['id_perms']
return result
# end prop_collection_http_get
def prop_collection_http_post(self):
request_params = get_request().json
# validate each requested operation
obj_uuid = request_params.get('uuid')
if not obj_uuid:
err_msg = 'Error: prop_collection_update needs obj_uuid'
raise cfgm_common.exceptions.HttpError(400, err_msg)
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Object Not Found: ' + obj_uuid)
r_class = self.get_resource_class(obj_type)
for req_param in request_params.get('updates') or []:
obj_field = req_param.get('field')
if obj_field in r_class.prop_list_fields:
prop_coll_type = 'list'
elif obj_field in r_class.prop_map_fields:
prop_coll_type = 'map'
else:
err_msg = '%s neither "ListProperty" nor "MapProperty"' %(
obj_field)
raise cfgm_common.exceptions.HttpError(400, err_msg)
req_oper = req_param.get('operation').lower()
field_val = req_param.get('value')
field_pos = str(req_param.get('position'))
prop_type = r_class.prop_field_types[obj_field]['xsd_type']
prop_cls = cfgm_common.utils.str_to_class(prop_type, __name__)
prop_val_type = prop_cls.attr_field_type_vals[prop_cls.attr_fields[0]]['attr_type']
prop_val_cls = cfgm_common.utils.str_to_class(prop_val_type, __name__)
try:
self._validate_complex_type(prop_val_cls, field_val)
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
if prop_coll_type == 'list':
if req_oper not in ('add', 'modify', 'delete'):
err_msg = 'Unsupported operation %s in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
if ((req_oper == 'add') and field_val is None):
err_msg = 'Add needs field value in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif ((req_oper == 'modify') and
None in (field_val, field_pos)):
err_msg = 'Modify needs field value and position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif ((req_oper == 'delete') and field_pos is None):
err_msg = 'Delete needs field position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif prop_coll_type == 'map':
if req_oper not in ('set', 'delete'):
err_msg = 'Unsupported operation %s in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
if ((req_oper == 'set') and field_val is None):
err_msg = 'Set needs field value in request %s' %(
req_oper, json.dumps(req_param))
elif ((req_oper == 'delete') and field_pos is None):
err_msg = 'Delete needs field position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
# Get actual resource from DB
fields = r_class.prop_fields | r_class.ref_fields
try:
ok, result = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=fields)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except Exception:
ok = False
result = cfgm_common.utils.detailed_traceback()
if not ok:
self.config_object_error(
obj_uuid, None, obj_type, 'prop_collection_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
db_obj_dict = result
# Look if the resource have a pending version, if yes use it as resource
# to update
if hasattr(r_class, 'get_pending_resource'):
ok, result = r_class.get_pending_resource(db_obj_dict, fields)
if ok and isinstance(result, dict):
db_obj_dict = result
obj_uuid = db_obj_dict['uuid']
if not ok and result[0] != 404:
self.config_object_error(obj_uuid, None, obj_type,
'prop_collection_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
self._put_common('prop-collection-update', obj_type, obj_uuid,
db_obj_dict,
req_prop_coll_updates=request_params.get('updates'))
# end prop_collection_http_post
def ref_update_http_post(self):
# grab fields
type = get_request().json.get('type')
res_type, res_class = self._validate_resource_type(type)
obj_uuid = get_request().json.get('uuid')
ref_type = get_request().json.get('ref-type')
ref_field = '%s_refs' %(ref_type.replace('-', '_'))
ref_res_type, ref_class = self._validate_resource_type(ref_type)
operation = get_request().json.get('operation')
ref_uuid = get_request().json.get('ref-uuid')
ref_fq_name = get_request().json.get('ref-fq-name')
attr = get_request().json.get('attr')
relax_ref_for_delete = get_request().json.get('relax_ref_for_delete', False)
# validate fields
if None in (res_type, obj_uuid, ref_res_type, operation):
err_msg = 'Bad Request: type/uuid/ref-type/operation is null: '
err_msg += '%s, %s, %s, %s.' \
%(res_type, obj_uuid, ref_res_type, operation)
raise cfgm_common.exceptions.HttpError(400, err_msg)
operation = operation.upper()
if operation not in ['ADD', 'DELETE']:
err_msg = 'Bad Request: operation should be add or delete: %s' \
%(operation)
raise cfgm_common.exceptions.HttpError(400, err_msg)
if not ref_uuid and not ref_fq_name:
err_msg = 'Bad Request: ref-uuid or ref-fq-name must be specified'
raise cfgm_common.exceptions.HttpError(400, err_msg)
obj_type = res_class.object_type
ref_obj_type = ref_class.object_type
if not ref_uuid:
try:
ref_uuid = self._db_conn.fq_name_to_uuid(ref_obj_type, ref_fq_name)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(ref_fq_name) + ' not found')
elif operation == 'ADD':
# if UUID provided verify existence of the reference being added
try:
ref_fq_name = self._db_conn.uuid_to_fq_name(ref_uuid)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
# To invoke type specific hook and extension manager
fields = res_class.prop_fields | res_class.ref_fields
try:
ok, result = self._db_conn.dbe_read(obj_type, obj_uuid, fields)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except Exception:
ok = False
result = cfgm_common.utils.detailed_traceback()
if not ok:
self.config_object_error(obj_uuid, None, obj_type, 'ref_update',
result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
db_obj_dict = result
# Look if the resource have a pending version, if yes use it as resource
# to update
if hasattr(res_class, 'get_pending_resource'):
ok, result = res_class.get_pending_resource(db_obj_dict, fields)
if ok and isinstance(result, dict):
db_obj_dict = result
obj_uuid = db_obj_dict['uuid']
if not ok and result[0] != 404:
self.config_object_error(
obj_uuid, None, obj_type, 'ref_update', result[1])
raise cfgm_common.exceptions.HttpError(result[0], result[1])
obj_dict = {'uuid': obj_uuid}
if ref_field in db_obj_dict:
obj_dict[ref_field] = copy.deepcopy(db_obj_dict[ref_field])
if operation == 'ADD':
if ref_obj_type+'_refs' not in obj_dict:
obj_dict[ref_obj_type+'_refs'] = []
existing_ref = [ref for ref in obj_dict[ref_obj_type+'_refs']
if ref['uuid'] == ref_uuid]
if existing_ref:
ref['attr'] = attr
else:
obj_dict[ref_obj_type+'_refs'].append(
{'to':ref_fq_name, 'uuid': ref_uuid, 'attr':attr})
elif operation == 'DELETE':
for old_ref in obj_dict.get(ref_obj_type+'_refs', []):
if old_ref['to'] == ref_fq_name or old_ref['uuid'] == ref_uuid:
obj_dict[ref_obj_type+'_refs'].remove(old_ref)
break
ref_args = {'ref_obj_type':ref_obj_type, 'ref_uuid': ref_uuid,
'operation': operation, 'data': {'attr': attr},
'relax_ref_for_delete': relax_ref_for_delete}
self._put_common('ref-update', obj_type, obj_uuid, db_obj_dict,
req_obj_dict=obj_dict, ref_args=ref_args)
return {'uuid': obj_uuid}
# end ref_update_http_post
def ref_relax_for_delete_http_post(self):
self._post_common(None, {})
# grab fields
obj_uuid = get_request().json.get('uuid')
ref_uuid = get_request().json.get('ref-uuid')
# validate fields
if None in (obj_uuid, ref_uuid):
err_msg = 'Bad Request: Both uuid and ref-uuid should be specified: '
err_msg += '%s, %s.' %(obj_uuid, ref_uuid)
raise cfgm_common.exceptions.HttpError(400, err_msg)
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
self._db_conn.ref_relax_for_delete(obj_uuid, ref_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'uuid ' + obj_uuid + ' not found')
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
fq_name = self._db_conn.uuid_to_fq_name(obj_uuid)
apiConfig.identifier_name=':'.join(fq_name)
apiConfig.identifier_uuid = obj_uuid
apiConfig.operation = 'ref-relax-for-delete'
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
return {'uuid': obj_uuid}
# end ref_relax_for_delete_http_post
def fq_name_to_id_http_post(self):
self._post_common(None, {})
type = get_request().json.get('type')
res_type, r_class = self._validate_resource_type(type)
obj_type = r_class.object_type
fq_name = get_request().json['fq_name']
try:
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
if obj_type == 'project':
resource_type, r_class = self._validate_resource_type(obj_type)
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_read_fqname' %(obj_type), fq_name)
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except Exception as e:
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(fq_name) + ' not found')
else:
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(fq_name) + ' not found')
# ensure user has access to this id
ok, result = self._permissions.check_perms_read(bottle.request, id)
if not ok:
err_code, err_msg = result
raise cfgm_common.exceptions.HttpError(err_code, err_msg)
return {'uuid': id}
# end fq_name_to_id_http_post
def id_to_fq_name_http_post(self):
self._post_common(None, {})
obj_uuid = get_request().json['uuid']
# ensure user has access to this id
ok, result = self._permissions.check_perms_read(get_request(), obj_uuid)
if not ok:
err_code, err_msg = result
raise cfgm_common.exceptions.HttpError(err_code, err_msg)
try:
fq_name = self._db_conn.uuid_to_fq_name(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'UUID ' + obj_uuid + ' not found')
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
res_type = self.get_resource_class(obj_type).resource_type
return {'fq_name': fq_name, 'type': res_type}
# end id_to_fq_name_http_post
# Enables a user-agent to store and retrieve key-val pair
# TODO this should be done only for special/quantum plugin
def useragent_kv_http_post(self):
self._post_common(None, {})
request_params = get_request().json
oper = request_params.get('operation')
if oper is None:
err_msg = ("Error: Key/value store API needs 'operation' "
"parameter")
raise cfgm_common.exceptions.HttpError(400, err_msg)
if 'key' not in request_params:
err_msg = ("Error: Key/value store API needs 'key' parameter")
raise cfgm_common.exceptions.HttpError(400, err_msg)
key = request_params.get('key')
val = request_params.get('value', '')
# TODO move values to common
if oper == 'STORE':
self._db_conn.useragent_kv_store(key, val)
elif oper == 'RETRIEVE':
try:
result = self._db_conn.useragent_kv_retrieve(key)
return {'value': result}
except NoUserAgentKey:
raise cfgm_common.exceptions.HttpError(
404, "Unknown User-Agent key " + key)
elif oper == 'DELETE':
result = self._db_conn.useragent_kv_delete(key)
else:
raise cfgm_common.exceptions.HttpError(
404, "Invalid Operation " + oper)
# end useragent_kv_http_post
def db_check(self):
""" Check database for inconsistencies. No update to database """
check_result = self._db_conn.db_check()
return {'results': check_result}
# end db_check
def fetch_records(self):
""" Retrieve and return all records """
result = self._db_conn.db_read()
return {'results': result}
# end fetch_records
def start_profile(self):
#GreenletProfiler.start()
pass
# end start_profile
def stop_profile(self):
pass
#GreenletProfiler.stop()
#stats = GreenletProfiler.get_func_stats()
#self._profile_info = stats.print_all()
#return self._profile_info
# end stop_profile
def get_profile_info(self):
return self._profile_info
# end get_profile_info
def get_resource_class(self, type_str):
if type_str in self._resource_classes:
return self._resource_classes[type_str]
common_name = cfgm_common.utils.CamelCase(type_str)
server_name = '%sServer' % common_name
try:
resource_class = getattr(vnc_cfg_types, server_name)
except AttributeError:
common_class = cfgm_common.utils.str_to_class(common_name,
__name__)
if common_class is None:
raise TypeError('Invalid type: ' + type_str)
# Create Placeholder classes derived from Resource, <Type> so
# resource_class methods can be invoked in CRUD methods without
# checking for None
resource_class = type(
str(server_name),
(vnc_cfg_types.Resource, common_class, object),
{})
resource_class.server = self
self._resource_classes[resource_class.object_type] = resource_class
self._resource_classes[resource_class.resource_type] = resource_class
return resource_class
# end get_resource_class
def list_bulk_collection_http_post(self):
""" List collection when requested ids don't fit in query params."""
type = get_request().json.get('type') # e.g. virtual-network
resource_type, r_class = self._validate_resource_type(type)
try:
parent_uuids = get_request().json['parent_id'].split(',')
except KeyError:
parent_uuids = None
try:
back_ref_uuids = get_request().json['back_ref_id'].split(',')
except KeyError:
back_ref_uuids = None
try:
obj_uuids = get_request().json['obj_uuids'].split(',')
except KeyError:
obj_uuids = None
is_count = get_request().json.get('count', False)
is_detail = get_request().json.get('detail', False)
include_shared = get_request().json.get('shared', False)
try:
filters = utils.get_filters(get_request().json.get('filters'))
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid filter ' + get_request().json.get('filters'))
req_fields = get_request().json.get('fields', [])
if req_fields:
req_fields = req_fields.split(',')
exclude_hrefs = get_request().json.get('exclude_hrefs', False)
pagination = {}
if 'page_marker' in get_request().json:
pagination['marker'] = self._validate_page_marker(
get_request().json['page_marker'])
if 'page_limit' in get_request().json:
pagination['limit'] = self._validate_page_limit(
get_request().json['page_limit'])
return self._list_collection(r_class.object_type, parent_uuids,
back_ref_uuids, obj_uuids, is_count,
is_detail, filters, req_fields,
include_shared, exclude_hrefs,
pagination)
# end list_bulk_collection_http_post
# Private Methods
def _parse_args(self, args_str):
'''
Eg. python vnc_cfg_api_server.py --cassandra_server_list
10.1.2.3:9160 10.1.2.4:9160
--redis_server_ip 127.0.0.1
--redis_server_port 6382
--collectors 127.0.0.1:8086
--http_server_port 8090
--listen_ip_addr 127.0.0.1
--listen_port 8082
--admin_port 8095
--region_name RegionOne
--log_local
--log_level SYS_DEBUG
--logging_level DEBUG
--logging_conf <logger-conf-file>
--log_category test
--log_file <stdout>
--trace_file /var/log/contrail/vnc_openstack.err
--use_syslog
--syslog_facility LOG_USER
--worker_id 1
--rabbit_max_pending_updates 4096
--rabbit_health_check_interval 120.0
--cluster_id <testbed-name>
[--auth keystone]
[--default_encoding ascii ]
--object_cache_size 10000
--object_cache_exclude_types ''
--max_request_size 1024000
'''
self._args, _ = utils.parse_args(args_str)
# end _parse_args
# sigchld handler is currently not engaged. See comment @sigchld
def sigchld_handler(self):
# DB interface initialization
self._db_connect(reset_config=False)
self._db_init_entries()
# end sigchld_handler
def sigterm_handler(self):
exit()
# sighup handler for applying new configs
def sighup_handler(self):
if self._args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read(self._args.conf_file)
if 'DEFAULTS' in config.sections():
try:
collectors = config.get('DEFAULTS', 'collectors')
if type(collectors) is str:
collectors = collectors.split()
new_chksum = hashlib.md5("".join(collectors)).hexdigest()
if new_chksum != self._chksum:
self._chksum = new_chksum
self._random_collectors = random.sample(collectors, len(collectors))
# Reconnect to achieve load-balance irrespective of list
self._sandesh.reconfig_collectors(self._random_collectors)
except ConfigParser.NoOptionError as e:
pass
# end sighup_handler
def _load_extensions(self):
try:
conf_sections = self._args.config_sections
if self._args.auth != 'no-auth':
self._extension_mgrs['resync'] = ExtensionManager(
'vnc_cfg_api.resync', api_server_ip=self._args.listen_ip_addr,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh)
self._extension_mgrs['resourceApi'] = ExtensionManager(
'vnc_cfg_api.resourceApi',
propagate_map_exceptions=True,
api_server_ip=self._args.listen_ip_addr,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh)
self._extension_mgrs['neutronApi'] = ExtensionManager(
'vnc_cfg_api.neutronApi',
api_server_ip=self._args.listen_ip_addr,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh,
api_server_obj=self)
except Exception as e:
err_msg = cfgm_common.utils.detailed_traceback()
self.config_log("Exception in extension load: %s" %(err_msg),
level=SandeshLevel.SYS_ERR)
# end _load_extensions
def _db_connect(self, reset_config):
cass_server_list = self._args.cassandra_server_list
redis_server_ip = self._args.redis_server_ip
redis_server_port = self._args.redis_server_port
zk_server = self._args.zk_server_ip
rabbit_servers = self._args.rabbit_server
rabbit_port = self._args.rabbit_port
rabbit_user = self._args.rabbit_user
rabbit_password = self._args.rabbit_password
rabbit_vhost = self._args.rabbit_vhost
rabbit_ha_mode = self._args.rabbit_ha_mode
cassandra_user = self._args.cassandra_user
cassandra_password = self._args.cassandra_password
cassandra_use_ssl = self._args.cassandra_use_ssl
cassandra_ca_certs = self._args.cassandra_ca_certs
obj_cache_entries = int(self._args.object_cache_entries)
obj_cache_exclude_types = \
[t.replace('-', '_').strip() for t in
self._args.object_cache_exclude_types.split(',')]
debug_obj_cache_types = \
[t.replace('-', '_').strip() for t in
self._args.debug_object_cache_types.split(',')]
db_engine = self._args.db_engine
self._db_engine = db_engine
cred = None
db_server_list = None
if db_engine == 'cassandra':
if cassandra_user is not None and cassandra_password is not None:
cred = {'username':cassandra_user,'password':<PASSWORD>}
db_server_list = cass_server_list
self._db_conn = VncDbClient(
self, db_server_list, rabbit_servers, rabbit_port, rabbit_user,
rabbit_password, rabbit_vhost, rabbit_ha_mode, reset_config,
zk_server, self._args.cluster_id, db_credential=cred,
db_engine=db_engine, rabbit_use_ssl=self._args.rabbit_use_ssl,
kombu_ssl_version=self._args.kombu_ssl_version,
kombu_ssl_keyfile= self._args.kombu_ssl_keyfile,
kombu_ssl_certfile=self._args.kombu_ssl_certfile,
kombu_ssl_ca_certs=self._args.kombu_ssl_ca_certs,
obj_cache_entries=obj_cache_entries,
obj_cache_exclude_types=obj_cache_exclude_types,
debug_obj_cache_types=debug_obj_cache_types,
cassandra_use_ssl=self._args.cassandra_use_ssl,
cassandra_ca_certs=self._args.cassandra_ca_certs)
#TODO refacter db connection management.
self._addr_mgmt._get_db_conn()
# end _db_connect
def _ensure_id_perms_present(self, obj_uuid, obj_dict):
"""
Called at resource creation to ensure that id_perms is present in obj
"""
# retrieve object and permissions
id_perms = self._get_default_id_perms()
if (('id_perms' not in obj_dict) or
(obj_dict['id_perms'] is None)):
# Resource creation
if obj_uuid is None:
obj_dict['id_perms'] = id_perms
return
return
# retrieve the previous version of the id_perms
# from the database and update the id_perms with
# them.
if obj_uuid is not None:
try:
old_id_perms = self._db_conn.uuid_to_obj_perms(obj_uuid)
for field, value in old_id_perms.items():
if value is not None:
id_perms[field] = value
except NoIdError:
pass
# not all fields can be updated
if obj_uuid:
field_list = ['enable', 'description']
else:
field_list = ['enable', 'description', 'user_visible', 'creator']
# Start from default and update from obj_dict
req_id_perms = obj_dict['id_perms']
for key in field_list:
if key in req_id_perms:
id_perms[key] = req_id_perms[key]
# TODO handle perms present in req_id_perms
obj_dict['id_perms'] = id_perms
# end _ensure_id_perms_present
def _get_default_id_perms(self, **kwargs):
id_perms = copy.deepcopy(Provision.defaults.perms)
id_perms_json = json.dumps(id_perms, default=lambda o: dict((k, v)
for k, v in o.__dict__.iteritems()))
id_perms_dict = json.loads(id_perms_json)
id_perms_dict.update(kwargs)
return id_perms_dict
# end _get_default_id_perms
def _ensure_perms2_present(self, obj_type, obj_uuid, obj_dict,
project_id=None):
"""
Called at resource creation to ensure that id_perms is present in obj
"""
# retrieve object and permissions
perms2 = self._get_default_perms2()
# set ownership of object to creator tenant
if obj_type == 'project' and 'uuid' in obj_dict:
perms2['owner'] = str(obj_dict['uuid']).replace('-', '')
elif obj_dict.get('perms2') and obj_dict['perms2'].get('owner'):
perms2['owner'] = obj_dict['perms2']['owner']
elif 'fq_name' in obj_dict and obj_dict['fq_name'][:-1]:
if 'parent_type' in obj_dict:
parent_type = obj_dict['parent_type'].replace('-', '_')
else:
r_class = self.get_resource_class(obj_type)
if (len(r_class.parent_types) != 1):
msg = ("Ambiguous parent to ensure permissiosn of %s, "
"please choose one parent type: %s" %
(obj_type, pformat(r_class.parent_types)))
raise cfgm_common.exceptions.HttpError(400, msg)
parent_type = r_class.parent_types[0].replace('-', '_')
if parent_type == 'domain':
if project_id:
perms2['owner'] = project_id
else:
perms2['owner'] = 'cloud-admin'
else:
parent_fq_name = obj_dict['fq_name'][:-1]
parent_uuid = obj_dict.get('parent_uuid')
try:
if parent_uuid is None:
try:
parent_uuid = self._db_conn.fq_name_to_uuid(
parent_type, parent_fq_name)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Name' + pformat(parent_fq_name) + ' not found')
ok, parent_obj_dict = self._db_conn.dbe_read(
parent_type, parent_uuid, obj_fields=['perms2'])
except NoIdError as e:
msg = "Parent %s cannot be found: %s" % (parent_type, str(e))
raise cfgm_common.exceptions.HttpError(404, msg)
perms2['owner'] = parent_obj_dict['perms2']['owner']
elif project_id:
perms2['owner'] = project_id
else:
perms2['owner'] = 'cloud-admin'
if obj_dict.get('perms2') is None:
# Resource creation
if obj_uuid is None:
obj_dict['perms2'] = perms2
return
# Resource already exists
try:
obj_dict['perms2'] = self._db_conn.uuid_to_obj_perms2(obj_uuid)
except NoIdError:
obj_dict['perms2'] = perms2
return
# retrieve the previous version of the perms2
# from the database and update the perms2 with
# them.
if obj_uuid is not None:
try:
old_perms2 = self._db_conn.uuid_to_obj_perms2(obj_uuid)
for field, value in old_perms2.items():
if value is not None:
perms2[field] = value
except NoIdError:
pass
# Start from default and update from obj_dict
req_perms2 = obj_dict['perms2']
for key in req_perms2:
perms2[key] = req_perms2[key]
# TODO handle perms2 present in req_perms2
obj_dict['perms2'] = perms2
# ensure is_shared and global_access are consistent
shared = obj_dict.get('is_shared', None)
gaccess = obj_dict['perms2'].get('global_access', None)
if (gaccess is not None and shared is not None and
shared != (gaccess != 0)):
msg = ("Inconsistent is_shared (%s a) and global_access (%s)" %
(shared, gaccess))
# NOTE(ethuleau): ignore exception for the moment as it breaks the
# Neutron use case where external network have global access but
# is property 'is_shared' is False https://review.opencontrail.org/#/q/Id6a0c1a509d7663da8e5bc86f2c7c91c73d420a2
# Before patch https://review.opencontrail.org/#q,I9f53c0f21983bf191b4c51318745eb348d48dd86,n,z
# error was also ignored as all retruned errors of that method were
# not took in account
# raise cfgm_common.exceptions.HttpError(400, msg)
def _get_default_perms2(self):
perms2 = copy.deepcopy(Provision.defaults.perms2)
perms2_json = json.dumps(perms2, default=lambda o: dict((k, v)
for k, v in o.__dict__.iteritems()))
perms2_dict = json.loads(perms2_json)
return perms2_dict
# end _get_default_perms2
def _db_init_entries(self):
# create singleton defaults if they don't exist already in db
gsc = self.create_singleton_entry(GlobalSystemConfig(
autonomous_system=64512, config_version=CONFIG_VERSION))
self._gsc_uuid = gsc.uuid
gvc = self.create_singleton_entry(GlobalVrouterConfig(
parent_obj=gsc))
self.create_singleton_entry(Domain())
# Global and default policy resources
pm = self.create_singleton_entry(PolicyManagement())
self._global_pm_uuid = pm.uuid
aps = self.create_singleton_entry(ApplicationPolicySet(
parent_obj=pm, all_applications=True))
ok, result = self._db_conn.ref_update(
ApplicationPolicySet.object_type,
aps.uuid,
GlobalVrouterConfig.object_type,
gvc.uuid,
{'attr': None},
'ADD',
None,
)
if not ok:
msg = ("Error while referencing global vrouter config %s with the "
"default global application policy set %s: %s" %
(gvc.uuid, aps.uuid, result[1]))
self.config_log(msg, level=SandeshLevel.SYS_ERR)
ip_fab_vn = self.create_singleton_entry(
VirtualNetwork(cfgm_common.IP_FABRIC_VN_FQ_NAME[-1],
is_provider_network=True))
self.create_singleton_entry(
RoutingInstance(cfgm_common.IP_FABRIC_VN_FQ_NAME[-1], ip_fab_vn,
routing_instance_is_default=True))
self.create_singleton_entry(
RoutingInstance('__default__', ip_fab_vn))
link_local_vn = self.create_singleton_entry(
VirtualNetwork(cfgm_common.LINK_LOCAL_VN_FQ_NAME[-1]))
self.create_singleton_entry(
RoutingInstance('__link_local__', link_local_vn,
routing_instance_is_default=True))
# specifying alarm kwargs like contrail_alarm.py
alarm_kwargs = {"alarm_rules":
{"or_list" : [
{"and_list": [
{ "operand1": "UveConfigReq.err_info.*.",
"operation": "==",
"operand2": {"json_value": "True"}
} ]
} ]
},
"alarm_severity": 1,
"fq_name": [
"default-global-system-config",
"system-defined-bottle-request-size-limit"
],
"id_perms": {
"description": "Bottle request size limit exceeded."
},
"parent_type": "global-system-config",
"uve_keys": {
"uve_key": [
"config-node"
]
}
}
self.create_singleton_entry(Alarm(**alarm_kwargs))
try:
self.create_singleton_entry(
RoutingInstance('default-virtual-network',
routing_instance_is_default=True))
except Exception as e:
self.config_log('error while creating primary routing instance for'
'default-virtual-network: ' + str(e),
level=SandeshLevel.SYS_NOTICE)
# Create singleton SG __no_rule__ object for openstack
domain_obj = Domain(SG_NO_RULE_FQ_NAME[0])
proj_obj = Project(SG_NO_RULE_FQ_NAME[1], domain_obj)
sg_rules = PolicyEntriesType()
id_perms = IdPermsType(enable=True,
description="Security group with no rules",
user_visible=True)
perms2 = PermType2(owner='cloud-admin')
perms2.set_global_access(PERMS_RX)
sg_obj = SecurityGroup(name=SG_NO_RULE_NAME,
parent_obj=proj_obj,
security_group_entries=sg_rules.exportDict(''),
id_perms=id_perms.exportDict(''),
perms2=perms2.exportDict(''))
self.create_singleton_entry(sg_obj)
self.create_singleton_entry(DiscoveryServiceAssignment())
self.create_singleton_entry(GlobalQosConfig())
sc_ipam_subnet_v4 = IpamSubnetType(subnet=SubnetType('0.0.0.0', 8))
sc_ipam_subnet_v6 = IpamSubnetType(subnet=SubnetType('::ffff', 104))
sc_ipam_subnets = IpamSubnets([sc_ipam_subnet_v4, sc_ipam_subnet_v6])
sc_ipam_obj = NetworkIpam('service-chain-flat-ipam',
ipam_subnet_method="flat-subnet", ipam_subnets=sc_ipam_subnets)
self.create_singleton_entry(sc_ipam_obj)
# Create pre-defined tag-type
for type_str, type_id in TagTypeNameToId.items():
type_id_hex = "0x{:04x}".format(type_id)
tag = TagType(name=type_str, tag_type_id=type_id_hex)
tag.display_name = type_str
self.create_singleton_entry(tag, user_visible=False)
if int(self._args.worker_id) == 0:
self._db_conn.db_resync()
#Load init data for job playbooks like JobTemplates, Tags, etc
if self._args.enable_fabric_ansible:
self._load_init_data()
# make default ipam available across tenants for backward compatability
obj_type = 'network_ipam'
fq_name = ['default-domain', 'default-project', 'default-network-ipam']
obj_uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, obj_uuid,
obj_fields=['perms2'])
obj_dict['perms2']['global_access'] = PERMS_RX
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
# end _db_init_entries
# Load init data for job playbooks like JobTemplates, Tags, etc
def _load_init_data(self):
"""
This function loads init data from a data file specified by the
argument '--fabric_ansible_dir' to the database. The data file
must be in JSON format and follow the format below:
{
"data": [
{
"object_type": "<vnc object type name>",
"objects": [
{
<vnc object payload>
},
...
]
},
...
]
}
Here is an example:
{
"data": [
{
"object_type": "tag",
"objects": [
{
"fq_name": [
"fabric=management_ip"
],
"name": "fabric=management_ip",
"tag_type_name": "fabric",
"tag_value": "management_ip"
}
]
}
]
}
"""
try:
json_data = self._load_json_data()
for item in json_data.get("data"):
object_type = item.get("object_type")
# Get the class name from object type
cls_name = cfgm_common.utils.CamelCase(object_type)
# Get the class object
cls_ob = cfgm_common.utils.str_to_class(cls_name, __name__)
# saving the objects to the database
for obj in item.get("objects"):
instance_obj = cls_ob(**obj)
self.create_singleton_entry(instance_obj)
# update default-global-system-config for supported_device_families
if object_type =='global-system-config':
fq_name = instance_obj.get_fq_name()
uuid = self._db_conn.fq_name_to_uuid('global_system_config', fq_name)
self._db_conn.dbe_update(object_type, uuid, obj)
for item in json_data.get("refs"):
from_type = item.get("from_type")
from_fq_name = item.get("from_fq_name")
from_uuid = self._db_conn._object_db.fq_name_to_uuid(
from_type, from_fq_name
)
to_type = item.get("to_type")
to_fq_name = item.get("to_fq_name")
to_uuid = self._db_conn._object_db.fq_name_to_uuid(
to_type, to_fq_name
)
ok, result = self._db_conn.ref_update(
from_type,
from_uuid,
to_type,
to_uuid,
{ 'attr': None },
'ADD',
None,
)
except Exception as e:
self.config_log('error while loading init data: ' + str(e),
level=SandeshLevel.SYS_NOTICE)
# end Load init data
# Load json data from fabric_ansible_playbooks/conf directory
def _load_json_data(self):
# open the json file
with open(self._args.fabric_ansible_dir +
'/conf/predef_payloads.json') as data_file:
input_json = json.load(data_file)
# Loop through the json
for item in input_json.get("data"):
if item.get("object_type") == "job-template":
for object in item.get("objects"):
fq_name = object.get("fq_name")[-1]
schema_name = fq_name.replace('template', 'schema.json')
with open(os.path.join(self._args.fabric_ansible_dir +
'/schema/', schema_name), 'r+') as schema_file:
schema_json = json.load(schema_file)
object["job_template_input_schema"] = schema_json.get(
"input_schema")
object["job_template_output_schema"] = schema_json.get(
"output_schema")
return input_json
# end load json data
# generate default rbac group rule
def _create_default_rbac_rule(self):
# allow full access to cloud admin
rbac_rules = [
{
'rule_object':'fqname-to-id',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'id-to-fqname',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'useragent-kv',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'documentation',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'R'}]
},
{
'rule_object':'/',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'R'}]
},
]
obj_type = 'api_access_list'
fq_name = ['default-global-system-config', 'default-api-access-list']
try:
# ensure global list is not missing any default rules (bug 1642464)
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, id)
update_obj = False
cur_rbac_rules = copy.deepcopy(obj_dict['api_access_list_entries']['rbac_rule'])
for rule in rbac_rules:
present = False
for existing_rule in cur_rbac_rules:
if rule == existing_rule:
present = True
cur_rbac_rules.remove(existing_rule)
break
if not present:
obj_dict['api_access_list_entries']['rbac_rule'].append(rule)
update_obj = True
if update_obj:
self._db_conn.dbe_update(obj_type, id, obj_dict)
return
except NoIdError:
pass
rge = RbacRuleEntriesType([])
for rule in rbac_rules:
rule_perms = [RbacPermType(role_name=p['role_name'], role_crud=p['role_crud']) for p in rule['rule_perms']]
rbac_rule = RbacRuleType(rule_object=rule['rule_object'],
rule_field=rule['rule_field'], rule_perms=rule_perms)
rge.add_rbac_rule(rbac_rule)
rge_dict = rge.exportDict('')
glb_rbac_cfg = ApiAccessList(parent_type='global-system-config',
fq_name=fq_name, api_access_list_entries = rge_dict)
try:
self.create_singleton_entry(glb_rbac_cfg)
except Exception as e:
err_msg = 'Error creating default api access list object'
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
# end _create_default_rbac_rule
def _resync_domains_projects(self, ext):
if hasattr(ext.obj, 'resync_domains_projects'):
ext.obj.resync_domains_projects()
# end _resync_domains_projects
def create_singleton_entry(self, singleton_obj, user_visible=True):
s_obj = singleton_obj
obj_type = s_obj.object_type
fq_name = s_obj.get_fq_name()
# TODO remove backward compat create mapping in zk
# for singleton START
try:
cass_uuid = self._db_conn._object_db.fq_name_to_uuid(obj_type, fq_name)
try:
zk_uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
# doesn't exist in zookeeper but does so in cassandra,
# migrate this info to zookeeper
self._db_conn._zk_db.create_fq_name_to_uuid_mapping(obj_type, fq_name, str(cass_uuid))
except NoIdError:
# doesn't exist in cassandra as well as zookeeper, proceed normal
pass
# TODO backward compat END
# create if it doesn't exist yet
try:
s_obj.uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
obj_json = json.dumps(s_obj, default=_obj_serializer_all)
obj_dict = json.loads(obj_json)
if s_obj.get_id_perms():
obj_dict['id_perms'] = s_obj.get_id_perms()
else:
obj_dict['id_perms'] = self._get_default_id_perms(
user_visible=user_visible)
if s_obj.get_perms2():
obj_dict['perms2'] = s_obj.get_perms2()
else:
obj_dict['perms2'] = self._get_default_perms2()
(ok, result) = self._db_conn.dbe_alloc(obj_type, obj_dict)
obj_id = result
s_obj.uuid = obj_id
# For virtual networks, allocate an ID
if obj_type == 'virtual_network':
vn_id = self.alloc_vn_id(s_obj.get_fq_name_str())
obj_dict['virtual_network_network_id'] = vn_id
if obj_type == 'tag':
obj_dict = self._allocate_tag_id(obj_dict)
self._db_conn.dbe_create(obj_type, obj_id, obj_dict)
self.create_default_children(obj_type, s_obj)
return s_obj
# end create_singleton_entry
# allocate tag id for tag object
def _allocate_tag_id(self, obj_dict):
type_str = obj_dict['tag_type_name']
value_str = obj_dict['tag_value']
ok, result = vnc_cfg_types.TagTypeServer.locate(
[type_str], id_perms=IdPermsType(user_visible=False))
tag_type = result
obj_dict['tag_type_refs'] = [
{
'uuid': tag_type['uuid'],
'to': tag_type['fq_name'],
},
]
# Allocate ID for tag value. Use the all fq_name to distinguish same
# tag values between global and scoped
value_id = vnc_cfg_types.TagServer.vnc_zk_client.alloc_tag_value_id(
type_str, ':'.join(obj_dict['fq_name']))
# Compose Tag ID with the type ID and value ID
obj_dict['tag_id'] = "{}{:04x}".format(tag_type['tag_type_id'],
value_id)
return obj_dict
# end allocate tag id
def _validate_page_marker(self, req_page_marker):
# query params always appears as string
if req_page_marker and req_page_marker.lower() != 'none':
try:
req_page_marker_uuid = req_page_marker.split(':')[-1]
_ = str(uuid.UUID(req_page_marker_uuid))
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid page_marker %s: %s' %(
req_page_marker, e))
else:
req_page_marker = None
return req_page_marker
# end _validate_page_marker
def _validate_page_limit(self, req_page_limit):
try:
val = int(req_page_limit)
if val <= 0:
raise Exception("page_limit has to be greater than zero")
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid page_limit %s: %s' %(
req_page_limit, e))
return int(req_page_limit)
# end _validate_page_limit
def _list_collection(self, obj_type, parent_uuids=None,
back_ref_uuids=None, obj_uuids=None,
is_count=False, is_detail=False, filters=None,
req_fields=None, include_shared=False,
exclude_hrefs=False, pagination=None):
resource_type, r_class = self._validate_resource_type(obj_type)
is_admin = self.is_admin_request()
if is_admin:
field_names = req_fields
else:
field_names = [u'id_perms'] + (req_fields or [])
if is_count and is_admin:
ret_result = 0
else:
ret_result = []
page_filled = False
if 'marker' in pagination:
# if marker is None, start scanning from uuid 0
page_start = pagination['marker'] or '0'
if 'limit' in pagination:
page_count = pagination['limit']
else:
page_count = self._args.paginate_count
else:
page_start = None # cookie to start next search
page_count = None # remainder count to finish page
(ok, result) = r_class.pre_dbe_list(obj_uuids, self._db_conn)
if not ok:
(code, msg) = result
raise cfgm_common.exceptions.HttpError(code, msg)
while not page_filled:
(ok, result, ret_marker) = self._db_conn.dbe_list(obj_type,
parent_uuids, back_ref_uuids, obj_uuids, is_count and is_admin,
filters, is_detail=is_detail, field_names=field_names,
include_shared=include_shared,
paginate_start=page_start,
paginate_count=page_count)
if not ok:
self.config_object_error(None, None, '%ss' %(obj_type),
'dbe_list', result)
raise cfgm_common.exceptions.HttpError(404, result)
# If only counting, return early
if is_count and is_admin:
ret_result += result
return {'%ss' %(resource_type): {'count': ret_result}}
allowed_fields = ['uuid', 'href', 'fq_name'] + (req_fields or [])
obj_dicts = []
if is_admin:
for obj_result in result:
if not exclude_hrefs:
obj_result['href'] = self.generate_url(
resource_type, obj_result['uuid'])
if is_detail:
obj_result['name'] = obj_result['fq_name'][-1]
obj_dicts.append({resource_type: obj_result})
else:
obj_dicts.append(obj_result)
else:
for obj_result in result:
id_perms = obj_result.get('id_perms')
if not id_perms:
# It is possible that the object was deleted, but received
# an update after that. We need to ignore it for now. In
# future, we should clean up such stale objects
continue
if not id_perms.get('user_visible', True):
# skip items not authorized
continue
(ok, status) = self._permissions.check_perms_read(
get_request(), obj_result['uuid'],
obj_result)
if not ok and status[0] == 403:
continue
obj_dict = {}
if is_detail:
obj_result = self.obj_view(resource_type, obj_result)
obj_result['name'] = obj_result['fq_name'][-1]
obj_dict.update(obj_result)
obj_dicts.append({resource_type: obj_dict})
else:
obj_dict.update(obj_result)
for key in obj_dict.keys():
if not key in allowed_fields:
del obj_dict[key]
if obj_dict.get('id_perms') and not 'id_perms' in allowed_fields:
del obj_dict['id_perms']
obj_dicts.append(obj_dict)
if not exclude_hrefs:
obj_dict['href'] = self.generate_url(resource_type, obj_result['uuid'])
# end obj_result in result
# end not admin req
ret_result.extend(obj_dicts)
if 'marker' not in pagination:
page_filled = True
elif ret_marker is None: # pagination request and done
page_filled = True
else: # pagination request and partially filled
page_start = ret_marker
page_count -= len(result)
if page_count <= 0:
page_filled = True
# end while not page_filled
(ok, err_msg) = r_class.post_dbe_list(ret_result, self._db_conn)
if not ok:
(code, msg) = err_msg
raise cfgm_common.exceptions.HttpError(code, msg)
if 'marker' in pagination: # send next marker along with results
if is_count:
return {'%ss' %(resource_type): {'count': len(ret_result)},
'marker': ret_marker}
else:
return {'%ss' %(resource_type): ret_result,
'marker': ret_marker}
if is_count:
return {'%ss' %(resource_type): {'count': len(ret_result)}}
else:
return {'%ss' %(resource_type): ret_result}
# end _list_collection
def get_db_connection(self):
return self._db_conn
# end get_db_connection
def generate_url(self, resource_type, obj_uuid):
try:
url_parts = get_request().urlparts
netloc = url_parts.netloc.replace('<script>', '<!--script>')
netloc = netloc.replace('</script>', '</script-->')
return '%s://%s/%s/%s'\
% (url_parts.scheme, netloc, resource_type, obj_uuid)
except Exception as e:
return '%s/%s/%s' % (self._base_url, resource_type, obj_uuid)
# end generate_url
def generate_hrefs(self, resource_type, obj_dict):
# return a copy of obj_dict with href keys for:
# self, parent, children, refs, backrefs
# don't update obj_dict as it may be cached object
r_class = self.get_resource_class(resource_type)
ret_obj_dict = obj_dict.copy()
ret_obj_dict['href'] = self.generate_url(
resource_type, obj_dict['uuid'])
try:
ret_obj_dict['parent_href'] = self.generate_url(
obj_dict['parent_type'], obj_dict['parent_uuid'])
except KeyError:
# No parent
pass
for child_field, child_field_info in \
r_class.children_field_types.items():
try:
children = obj_dict[child_field]
child_type = child_field_info[0]
ret_obj_dict[child_field] = [
dict(c, href=self.generate_url(child_type, c['uuid']))
for c in children]
except KeyError:
# child_field doesn't exist in original
pass
# end for all child fields
for ref_field, ref_field_info in r_class.ref_field_types.items():
try:
refs = obj_dict[ref_field]
ref_type = ref_field_info[0]
ret_obj_dict[ref_field] = [
dict(r, href=self.generate_url(ref_type, r['uuid']))
for r in refs]
except KeyError:
# ref_field doesn't exist in original
pass
# end for all ref fields
for backref_field, backref_field_info in \
r_class.backref_field_types.items():
try:
backrefs = obj_dict[backref_field]
backref_type = backref_field_info[0]
ret_obj_dict[backref_field] = [
dict(b, href=self.generate_url(backref_type, b['uuid']))
for b in backrefs]
except KeyError:
# backref_field doesn't exist in original
pass
# end for all backref fields
return ret_obj_dict
# end generate_hrefs
def config_object_error(self, id, fq_name_str, obj_type,
operation, err_str):
apiConfig = VncApiCommon()
if obj_type is not None:
apiConfig.object_type = obj_type
apiConfig.identifier_name = fq_name_str
apiConfig.identifier_uuid = id
apiConfig.operation = operation
if err_str:
apiConfig.error = "%s:%s" % (obj_type, err_str)
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
# end config_object_error
def config_log(self, msg_str, level=SandeshLevel.SYS_INFO):
errcls = {
SandeshLevel.SYS_DEBUG: VncApiDebug,
SandeshLevel.SYS_INFO: VncApiInfo,
SandeshLevel.SYS_NOTICE: VncApiNotice,
SandeshLevel.SYS_ERR: VncApiError,
}
errcls.get(level, VncApiError)(
api_msg=msg_str, level=level, sandesh=self._sandesh).send(
sandesh=self._sandesh)
# end config_log
def _set_api_audit_info(self, apiConfig):
apiConfig.url = get_request().url
apiConfig.remote_ip = get_request().headers.get('Host')
useragent = get_request().headers.get('X-Contrail-Useragent')
if not useragent:
useragent = get_request().headers.get('User-Agent')
apiConfig.useragent = useragent
apiConfig.user = get_request().headers.get('X-User-Name')
apiConfig.project = get_request().headers.get('X-Project-Name')
apiConfig.domain = get_request().headers.get('X-Domain-Name', 'None')
if apiConfig.domain.lower() == 'none':
apiConfig.domain = 'default-domain'
if int(get_request().headers.get('Content-Length', 0)) > 0:
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
# end _set_api_audit_info
# uuid is parent's for collections
def _get_common(self, request, uuid=None):
# TODO check api + resource perms etc.
if self.is_auth_needed() and uuid:
if isinstance(uuid, list):
for u_id in uuid:
ok, result = self._permissions.check_perms_read(request,
u_id)
if not ok:
return ok, result
else:
return self._permissions.check_perms_read(request, uuid)
return (True, '')
# end _get_common
def _put_common(
self, api_name, obj_type, obj_uuid, db_obj_dict, req_obj_dict=None,
req_prop_coll_updates=None, ref_args=None, quota_dict=None):
obj_fq_name = db_obj_dict.get('fq_name', 'missing-fq-name')
# ZK and rabbitmq should be functional
self._ensure_services_conn(
api_name, obj_type, obj_uuid, obj_fq_name)
resource_type, r_class = self._validate_resource_type(obj_type)
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_update' %(obj_type), obj_uuid, req_obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_update an extension had error for %s' \
%(obj_type, req_obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
db_conn = self._db_conn
# check visibility
if (not db_obj_dict['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % obj_uuid
self.config_object_error(obj_uuid, None, obj_type, api_name, result)
raise cfgm_common.exceptions.HttpError(404, result)
# properties validator (for collections validation in caller)
if req_obj_dict is not None:
ok, result = self._validate_props_in_request(r_class,
req_obj_dict, operation='UPDATE')
if not ok:
result = 'Bad property in %s: %s' %(api_name, result)
raise cfgm_common.exceptions.HttpError(400, result)
# references validator
if req_obj_dict is not None:
ok, result = self._validate_refs_in_request(r_class, req_obj_dict)
if not ok:
result = 'Bad reference in %s: %s' %(api_name, result)
raise cfgm_common.exceptions.HttpError(400, result)
# common handling for all resource put
request = get_request()
fq_name_str = ":".join(obj_fq_name or [])
if req_obj_dict:
if ('id_perms' in req_obj_dict and
req_obj_dict['id_perms'].get('uuid')):
if not self._db_conn.match_uuid(req_obj_dict, obj_uuid):
msg = (
"UUID mismatch from %s:%s" %
(request.environ.get('REMOTE_ADDR',
"Remote address not found"),
request.environ.get('HTTP_USER_AGENT',
"User agent not found"))
)
self.config_object_error(
obj_uuid, fq_name_str, obj_type, 'put', msg)
self._db_conn.set_uuid(obj_type, req_obj_dict,
uuid.UUID(obj_uuid),
do_lock=False)
# Ensure object has at least default permissions set
self._ensure_id_perms_present(obj_uuid, req_obj_dict)
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name = fq_name_str
apiConfig.identifier_uuid = obj_uuid
apiConfig.operation = api_name
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig,
sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
if self.is_auth_needed():
ok, result = self._permissions.check_perms_write(request, obj_uuid)
if not ok:
(code, msg) = result
self.config_object_error(
obj_uuid, fq_name_str, obj_type, api_name, msg)
raise cfgm_common.exceptions.HttpError(code, msg)
# Validate perms on references
if req_obj_dict is not None:
try:
self._validate_perms_in_request(
r_class, obj_type, req_obj_dict)
except NoIdError:
raise cfgm_common.exceptions.HttpError(400,
'Unknown reference in resource update %s %s.'
%(obj_type, req_obj_dict))
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
if req_obj_dict is not None:
req_obj_dict['uuid'] = obj_uuid
# Permit abort resource update and retrun 202 status code
get_context().set_state('PENDING_DBE_UPDATE')
ok, result = r_class.pending_dbe_update(db_obj_dict, req_obj_dict,
req_prop_coll_updates)
if not ok:
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
if ok and isinstance(result, tuple) and result[0] == 202:
# Modifications accepted but not applied, pending update
# returns 202 HTTP OK code to aware clients
bottle.response.status = 202
return True, ''
def stateful_update():
get_context().set_state('PRE_DBE_UPDATE')
# type-specific hook
(ok, result) = r_class.pre_dbe_update(
obj_uuid, obj_fq_name, req_obj_dict or {}, self._db_conn,
prop_collection_updates=req_prop_coll_updates)
if not ok:
return (ok, result)
attr_to_publish = None
if isinstance(result, dict):
attr_to_publish = result
get_context().set_state('DBE_UPDATE')
if api_name == 'ref-update':
# read ref_update args
ref_obj_type = ref_args.get('ref_obj_type')
ref_uuid = ref_args.get('ref_uuid')
ref_data = ref_args.get('data')
operation = ref_args.get('operation')
relax_ref_for_delete = ref_args.get('relax_ref_for_delete', False)
(ok, result) = db_conn.ref_update(
obj_type,
obj_uuid,
ref_obj_type,
ref_uuid,
ref_data,
operation,
db_obj_dict['id_perms'],
attr_to_publish=attr_to_publish,
relax_ref_for_delete=relax_ref_for_delete
)
elif req_obj_dict:
(ok, result) = db_conn.dbe_update(
obj_type,
obj_uuid,
req_obj_dict,
attr_to_publish=attr_to_publish,
)
# Update quota counter
if resource_type == 'project' and 'quota' in req_obj_dict:
proj_id = req_obj_dict['uuid']
quota_dict = req_obj_dict['quota']
path_prefix = self._path_prefix + proj_id
try:
QuotaHelper._zk_quota_counter_update(
path_prefix, quota_dict, proj_id, db_conn,
self.quota_counter)
except NoIdError:
msg = "Error in initializing quota "\
"Internal error : Failed to read resource count"
self.config_log(msg, level=SandeshLevel.SYS_ERR)
elif req_prop_coll_updates:
(ok, result) = db_conn.prop_collection_update(
obj_type,
obj_uuid,
req_prop_coll_updates,
attr_to_publish=attr_to_publish,
)
if not ok:
return (ok, result)
get_context().set_state('POST_DBE_UPDATE')
# type-specific hook
(ok, result) = r_class.post_dbe_update(
obj_uuid, obj_fq_name, req_obj_dict or {}, self._db_conn,
prop_collection_updates=req_prop_coll_updates)
if not ok:
return (ok, result)
return (ok, result)
# end stateful_update
try:
ok, result = stateful_update()
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, err_msg)
if not ok:
self.undo(result, obj_type, id=obj_uuid)
# Revert changes made to quota counter by using DB quota dict
if resource_type == 'project' and 'quota' in req_obj_dict:
proj_id = db_obj_dict['uuid']
quota_dict = db_obj_dict.get('quota') or None
path_prefix = self._path_prefix + proj_id
try:
QuotaHelper._zk_quota_counter_update(
path_prefix, quota_dict, proj_id, self._db_conn,
self.quota_counter)
except NoIdError:
err_msg = "Error in rolling back quota count on undo "\
"Internal error : Failed to read resource count"
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_update' %(obj_type), obj_uuid,
req_obj_dict, db_obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In post_%s_update an extension had error for %s' \
%(obj_type, req_obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# end _put_common
# parent_type needed for perms check. None for derived objects (eg.
# routing-instance)
def _delete_common(self, request, obj_type, uuid, parent_uuid):
# If not connected to zookeeper do not allow operations that
# causes the state change
if not self._db_conn._zk_db.is_connected():
return (False,
(503, "Not connected to zookeeper. Not able to perform requested action"))
# If there are too many pending updates to rabbit, do not allow
# operations that cause state change
npending = self._db_conn.dbe_oper_publish_pending()
if (npending >= int(self._args.rabbit_max_pending_updates)):
err_str = str(MaxRabbitPendingError(npending))
return (False, (500, err_str))
fq_name = self._db_conn.uuid_to_fq_name(uuid)
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name=':'.join(fq_name)
apiConfig.identifier_uuid = uuid
apiConfig.operation = 'delete'
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
# TODO check api + resource perms etc.
if not self.is_auth_needed() or not parent_uuid:
return (True, '')
"""
Validate parent allows write access. Implicitly trust
parent info in the object since coming from our DB.
"""
return self._permissions.check_perms_delete(request, obj_type, uuid,
parent_uuid)
# end _http_delete_common
def _post_validate(self, obj_type=None, obj_dict=None):
if not obj_dict:
return
def _check_field_present(fname):
fval = obj_dict.get(fname)
if not fval:
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, no %s in POST body" %(fname))
return fval
fq_name = _check_field_present('fq_name')
# well-formed name checks
if illegal_xml_chars_RE.search(fq_name[-1]):
raise cfgm_common.exceptions.HttpError(400,
"Bad Request, name has illegal xml characters")
if obj_type == 'route_target':
invalid_chars = self._INVALID_NAME_CHARS - set(':')
else:
invalid_chars = self._INVALID_NAME_CHARS
if any((c in invalid_chars) for c in fq_name[-1]):
raise cfgm_common.exceptions.HttpError(400,
"Bad Request, name has one of invalid chars %s"
%(invalid_chars))
# end _post_validate
def validate_parent_type(self, obj_type, obj_dict):
parent_type = obj_dict.get('parent_type')
r_class = self.get_resource_class(obj_type)
allowed_parent_types = r_class.parent_types
if parent_type:
if parent_type not in allowed_parent_types:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid parent type: %s. Allowed types: %s' % (
parent_type, allowed_parent_types))
elif (len(allowed_parent_types) > 1 and
'config-root' not in allowed_parent_types):
raise cfgm_common.exceptions.HttpError(
400, 'Missing parent type: %s. Allowed types: %s' % (
parent_type, allowed_parent_types))
elif len(allowed_parent_types) == 1:
parent_type = allowed_parent_types[0]
if parent_type in ('config-root', None):
if len(obj_dict['fq_name']) != 1:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid fq-name of an object with no parent: %s' % (
obj_dict['fq_name']))
elif len(obj_dict['fq_name']) < 2:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid fq-name for object with parent_type %s: %s' % (
parent_type, obj_dict['fq_name']))
# end validate_parent_type
def _post_common(self, obj_type, obj_dict):
self._ensure_services_conn(
'http_post', obj_type, obj_fq_name=obj_dict.get('fq_name'))
if not obj_dict:
# TODO check api + resource perms etc.
return (True, None)
# Fail if object exists already
try:
obj_uuid = self._db_conn.fq_name_to_uuid(
obj_type, obj_dict['fq_name'])
raise cfgm_common.exceptions.HttpError(
409, '' + pformat(obj_dict['fq_name']) +
' already exists with uuid: ' + obj_uuid)
except NoIdError:
pass
self.validate_parent_type(obj_type, obj_dict)
# Ensure object has at least default permissions set
self._ensure_id_perms_present(None, obj_dict)
self._ensure_perms2_present(obj_type, None, obj_dict,
get_request().headers.environ.get('HTTP_X_PROJECT_ID', None))
# TODO check api + resource perms etc.
uuid_in_req = obj_dict.get('uuid', None)
# Set the display name
if (('display_name' not in obj_dict) or
(obj_dict['display_name'] is None)):
obj_dict['display_name'] = obj_dict['fq_name'][-1]
fq_name_str = ":".join(obj_dict['fq_name'])
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name=fq_name_str
apiConfig.identifier_uuid = uuid_in_req
apiConfig.operation = 'post'
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
if uuid_in_req:
if uuid_in_req != str(uuid.UUID(uuid_in_req)):
bottle.abort(400, 'Invalid UUID format: ' + uuid_in_req)
try:
fq_name = self._db_conn.uuid_to_fq_name(uuid_in_req)
raise cfgm_common.exceptions.HttpError(
409, uuid_in_req + ' already exists with fq_name: ' +
pformat(fq_name))
except NoIdError:
pass
apiConfig.identifier_uuid = uuid_in_req
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
return (True, uuid_in_req)
# end _post_common
def reset(self):
# cleanup internal state/in-flight operations
if self._db_conn:
self._db_conn.reset()
# end reset
# allocate block of IP addresses from VN. Subnet info expected in request
# body
def vn_ip_alloc_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
# expected format {"subnet_list" : "192.168.127.12/24", "count" : 4}
req_dict = get_request().json
count = req_dict.get('count', 1)
subnet = req_dict.get('subnet')
family = req_dict.get('family')
try:
result = vnc_cfg_types.VirtualNetworkServer.ip_alloc(
vn_fq_name, subnet, count, family)
except vnc_addr_mgmt.AddrMgmtSubnetUndefined as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except vnc_addr_mgmt.AddrMgmtSubnetExhausted as e:
raise cfgm_common.exceptions.HttpError(409, str(e))
return result
# end vn_ip_alloc_http_post
# free block of ip addresses to subnet
def vn_ip_free_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
"""
{
"subnet" : "192.168.127.12/24",
"ip_addr": [ "192.168.3.11", "192.168.3.11", "192.168.127.12", "172.16.31.10" ]
}
"""
req_dict = get_request().json
ip_list = req_dict['ip_addr'] if 'ip_addr' in req_dict else []
result = vnc_cfg_types.VirtualNetworkServer.ip_free(
vn_fq_name, ip_list)
return result
# end vn_ip_free_http_post
# return no. of IP addresses from VN/Subnet
def vn_subnet_ip_count_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
# expected format {"subnet_list" : ["2.1.1.0/24", "1.1.1.0/24"]
req_dict = get_request().json
try:
(ok, result) = self._db_conn.dbe_read('virtual_network', id)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except Exception as e:
ok = False
result = cfgm_common.utils.detailed_traceback()
if not ok:
raise cfgm_common.exceptions.HttpError(500, result)
obj_dict = result
subnet_list = req_dict[
'subnet_list'] if 'subnet_list' in req_dict else []
result = vnc_cfg_types.VirtualNetworkServer.subnet_ip_count(
vn_fq_name, subnet_list)
return result
# end vn_subnet_ip_count_http_post
# check if token validatation needed
def is_auth_needed(self):
return self.aaa_mode != 'no-auth'
def is_rbac_enabled(self):
return self.aaa_mode == 'rbac'
@property
def aaa_mode(self):
return self._args.aaa_mode
@aaa_mode.setter
def aaa_mode(self, mode):
self._args.aaa_mode = mode
# indication if multi tenancy with rbac is enabled or disabled
def aaa_mode_http_get(self):
return {'aaa-mode': self.aaa_mode}
def aaa_mode_http_put(self):
aaa_mode = get_request().json['aaa-mode']
if aaa_mode not in AAA_MODE_VALID_VALUES:
raise ValueError('Invalid aaa-mode %s' % aaa_mode)
ok, result = self._auth_svc.validate_user_token()
if not ok:
code, msg = result
self.config_object_error(None, None, None, 'aaa_mode_http_put',
msg)
raise cfgm_common.exceptions.HttpError(code, msg)
if not self.is_admin_request():
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
self.aaa_mode = aaa_mode
if self.is_rbac_enabled():
self._create_default_rbac_rule()
return {'aaa-mode': self.aaa_mode}
# end
@property
def cloud_admin_role(self):
return self._args.cloud_admin_role
@property
def global_read_only_role(self):
return self._args.global_read_only_role
def set_tag(self):
self._post_common(None, {})
req_dict = get_request().json
obj_type = req_dict.pop('obj_type')
obj_uuid = req_dict.pop('obj_uuid')
need_update = False
if obj_type is None or obj_uuid is None:
msg = "Object type and UUID must be specified"
raise cfgm_common.exceptions.HttpError(400, msg)
ok, result = self._db_conn.dbe_read(
obj_type,
obj_uuid,
obj_fields=['parent_type', 'perms2', 'tag_refs'],
)
if not ok:
raise cfgm_common.exceptions.HttpError(*result)
obj_dict = result
def _locate_tag(type, value, is_global=False):
name = type + "=" + value
# unless global, inherit project id from caller
if is_global:
fq_name = [name]
else:
fq_name = copy.deepcopy(obj_dict['fq_name'])
if obj_type == 'project':
fq_name.append(name)
elif ('parent_type' in obj_dict and
obj_dict['parent_type'] == 'project'):
fq_name[-1] = name
elif ('perms2' in obj_dict and
is_uuid_like(obj_dict['perms2']['owner'])):
parent_uuid = str(uuid.UUID(obj_dict['perms2']['owner']))
try:
fq_name = self._db_conn.uuid_to_fq_name(parent_uuid)
except NoIdError:
msg = ("Cannot find %s %s owner" %
(obj_type, obj_dict['uuid']))
raise cfgm_common.exceptions.HttpError(404, msg)
fq_name.append(name)
else:
msg = ("Not able to determine the scope of the tag '%s'" %
name)
raise cfgm_common.exceptions.HttpError(404, msg)
# lookup (validate) tag
try:
tag_uuid = self._db_conn.fq_name_to_uuid('tag', fq_name)
except NoIdError:
msg = "Tag with FQName %s not found" % pformat(fq_name)
raise cfgm_common.exceptions.HttpError(404, msg)
return fq_name, tag_uuid
refs_per_type = {}
for ref in obj_dict.get('tag_refs', []):
ref_type = ref['to'][-1].partition('=')[0]
refs_per_type.setdefault(ref_type, []).append(ref)
for tag_type, attrs in req_dict.items():
tag_type = tag_type.lower()
# If the body of a Tag type is None, all references to that Tag
# type are remove on the resource
if attrs is None:
for ref in refs_per_type.get(tag_type, []):
need_update = True
obj_dict['tag_refs'].remove(ref)
refs_per_type[tag_type] = []
continue
# Else get defined values and update Tag references on the resource
is_global = attrs.get('is_global', False)
value = attrs.get('value')
add_values = set(attrs.get('add_values', []))
delete_values = set(attrs.get('delete_values', []))
# Tag type is unique per object, unless
# TAG_TYPE_NOT_UNIQUE_PER_OBJECT type
if tag_type not in TAG_TYPE_NOT_UNIQUE_PER_OBJECT:
if add_values or delete_values:
msg = ("Tag type %s cannot be set multiple times on a "
"same object." % tag_type)
raise cfgm_common.exceptions.HttpError(400, msg)
# address-group object can only be associated with label
if (obj_type == 'address_group' and
tag_type not in TAG_TYPE_AUTHORIZED_ON_ADDRESS_GROUP):
msg = ("Invalid tag type %s for object type %s" %
(tag_type, obj_type))
raise cfgm_common.exceptions.HttpError(400, msg)
refs_per_values = {}
if tag_type in refs_per_type:
refs_per_values = {ref['to'][-1].partition('=')[2]: ref for ref
in refs_per_type[tag_type]}
if tag_type not in TAG_TYPE_NOT_UNIQUE_PER_OBJECT:
if value is None or isinstance(value, list):
msg = "No valid value provided for tag type %s" % tag_type
raise cfgm_common.exceptions.HttpError(400, msg)
# don't need to update if tag type with same value already
# referenced
if value in refs_per_values:
continue
for ref in refs_per_values.values():
need_update = True
# object already have a reference to that tag type with a
# different value, remove it
obj_dict['tag_refs'].remove(ref)
# finally, reference the tag type with the new value
tag_fq_name, tag_uuid = _locate_tag(tag_type, value, is_global)
obj_dict.setdefault('tag_refs', []).append({
'uuid': tag_uuid,
'to': tag_fq_name,
'attr': None,
})
need_update = True
else:
# Add 'value' attribut to 'add_values' list if not null
if value is not None:
add_values.add(value)
for add_value in add_values - set(refs_per_values.keys()):
need_update = True
tag_fq_name, tag_uuid = _locate_tag(tag_type, add_value,
is_global)
obj_dict.setdefault('tag_refs', []).append({
'uuid': tag_uuid,
'to': tag_fq_name,
'attr': None,
})
for del_value in delete_values & set(refs_per_values.keys()):
need_update = True
obj_dict['tag_refs'].remove(refs_per_values[del_value])
if need_update:
self._db_conn.dbe_update(obj_type, obj_uuid, obj_dict)
return {}
def security_policy_draft(self):
self._post_common(None, {})
req_dict = get_request().json
scope_uuid = req_dict.pop('scope_uuid')
action = req_dict.pop('action')
pm_class = self.get_resource_class('policy-management')
try:
scope_type = self._db_conn.uuid_to_obj_type(scope_uuid)
except NoIdError as e:
msg = ("Cannot find scope where pending security resource are "
"own: %s" % str(e))
scope_class = self.get_resource_class(scope_type)
scope_fq_name = self._db_conn.uuid_to_fq_name(scope_uuid)
pm_fq_name = [POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT]
if (scope_type == GlobalSystemConfig.object_type and
scope_fq_name == GlobalSystemConfig().fq_name):
parent_type = PolicyManagement.resource_type
parent_fq_name = PolicyManagement().fq_name
parent_uuid = self._global_pm_uuid
else:
pm_fq_name = scope_fq_name + pm_fq_name
parent_type = scope_class.resource_type
parent_fq_name = scope_fq_name
parent_uuid = scope_uuid
ok, result = pm_class.locate(
fq_name=pm_fq_name,
create_it=False,
fields=['%ss' % type for type in SECURITY_OBJECT_TYPES],
)
if not ok and result[0] == 404:
# Draft dedicated policy management does not exists, the draft mode
# is not enabled on the scope
msg = ("Security draft mode is not enabled on the %s %s (%s)" %
(scope_type.replace('_', ' ').title(), scope_fq_name,
scope_uuid))
raise cfgm_common.exceptions.HttpError(400, msg)
if not ok:
raise cfgm_common.exceptions.HttpError(result[0], result[1])
pm = result
scope_lock = self._db_conn._zk_db._zk_client.write_lock(
'%s/%s/%s' % (
self.security_lock_prefix, scope_type,
':'.join(scope_fq_name)
),
'api-server-%s %s' % (socket.gethostname(), action),
)
try:
acquired_lock = scope_lock.acquire(timeout=1)
except LockTimeout:
acquired_lock = False
if acquired_lock:
try:
if action == 'commit':
self._security_commit_resources(scope_type, parent_type,
parent_fq_name,
parent_uuid, pm)
elif action == 'discard':
self._security_discard_resources(pm)
else:
msg = "Only 'commit' or 'discard' actions are supported"
raise cfgm_common.exceptions.HttpError(400, msg)
finally:
scope_lock.release()
else:
contenders = scope_lock.contenders()
action_in_progress = '<unknown action>'
if len(contenders) > 0 and contenders[0]:
_, _, action_in_progress = contenders[0].partition(' ')
msg = ("Security resource modifications or commit/discard action "
"on %s '%s' (%s) scope is under progress. Try again later."
% (scope_type.replace('_', ' ').title(),
':'.join(scope_fq_name), scope_uuid))
raise cfgm_common.exceptions.HttpError(400, msg)
# TODO(ethuleau): we could return some stats or type/uuid resources
# actions which were done during commit or discard?
return {}
def _security_commit_resources(self, scope_type, parent_type,
parent_fq_name, parent_uuid, pm):
updates = []
deletes = []
held_refs = []
for type_name in SECURITY_OBJECT_TYPES:
r_class = self.get_resource_class(type_name)
for child in pm.get('%ss' % r_class.object_type, []):
ok, result = r_class.locate(child['to'], child['uuid'],
create_it=False)
if not ok:
continue
draft = result
fq_name = parent_fq_name + [child['to'][-1]]
try:
uuid = self._db_conn.fq_name_to_uuid(r_class.object_type,
fq_name)
except NoIdError:
# No original version found, new resource created
uuid = None
self._holding_backrefs(held_refs, scope_type,
r_class.object_type, fq_name, draft)
# Purge pending resource as we re-use the same UUID
self.internal_request_delete(r_class.object_type,
child['uuid'])
if uuid and draft['draft_mode_state'] == 'deleted':
# The resource is removed, we can purge original resource
deletes.append((r_class.object_type, uuid))
elif uuid and draft['draft_mode_state'] == 'updated':
# Update orginal resource with pending resource
draft.pop('fq_name', None)
draft.pop('uuid', None)
draft.pop('draft_mode_state', None)
if 'id_perms' in draft:
draft['id_perms'].pop('uuid', None)
draft['parent_type'] = parent_type
draft['parent_uuid'] = parent_uuid
# if a ref type was purge when the draft mode is enabled,
# set the ref to an empty list to ensure all refs will be
# removed when resource will be updated/committed
for ref_type in r_class.ref_fields:
if ref_type not in draft:
draft[ref_type] = []
self._update_fq_name_security_refs(
parent_fq_name, pm['fq_name'], type_name, draft)
updates.append(('update', (r_class.resource_type, uuid,
copy.deepcopy(draft))))
elif not uuid and draft['draft_mode_state'] == 'created':
# Create new resource with pending values (re-use UUID)
draft.pop('id_perms', None)
draft.pop('perms2', None)
draft.pop('draft_mode_state', None)
draft['fq_name'] = fq_name
draft['parent_type'] = parent_type
draft['parent_uuid'] = parent_uuid
self._update_fq_name_security_refs(
parent_fq_name, pm['fq_name'], type_name, draft)
updates.append(('create', (r_class.resource_type,
copy.deepcopy(draft))))
else:
msg = (
"Try to commit a security resource %s (%s) with "
"invalid state '%s'. Ignore it." %
(':'.join(draft.get('fq_name', ['FQ name unknown'])),
draft.get('uuid', 'UUID unknown'),
draft.get('draft_mode_state', 'No draft mode state'))
)
self.config_log(msg, level=SandeshLevel.SYS_WARN)
# Need to create/update leaf resources first as they could be
# referenced by another create/updated resource (e.g.: FP -> FP)
updates.reverse() # order is: AG, SG, FR, FP and APS
for action, args in updates:
getattr(self, 'internal_request_%s' % action)(*args)
# Postpone delete to be sure deleted resource not anymore
# referenced and delete resource with ref before resource with backref
for args in deletes: # order is: APS, FP, FR, SG and AG
self.internal_request_delete(*args)
for args, kwargs in held_refs:
self.internal_request_ref_update(*args, **kwargs)
@staticmethod
def _update_fq_name_security_refs(parent_fq_name, pm_fq_name, res_type,
draft):
for ref_type in SECURITY_OBJECT_TYPES:
for ref in draft.get('%s_refs' % ref_type, []):
if ref['to'][:-1] == pm_fq_name:
ref['to'] = parent_fq_name + [ref['to'][-1]]
if res_type == 'firewall_rule':
for ep in [draft.get('endpoint_1', {}),
draft.get('endpoint_2', {})]:
ag_fq_name = ep.get('address_group', [])
if ag_fq_name and ag_fq_name.split(':')[:-1] == pm_fq_name:
ep['address_group'] = ':'.join(parent_fq_name + [
ag_fq_name.split(':')[-1]])
def _holding_backrefs(self, held_refs, scope_type, obj_type, fq_name,
obj_dict):
backref_fields = {'%s_back_refs' % t for t in SECURITY_OBJECT_TYPES}
if (scope_type == GlobalSystemConfig().object_type and
obj_dict['draft_mode_state'] != 'deleted'):
for backref_field in set(obj_dict.keys()) & backref_fields:
backref_type = backref_field[:-10]
for backref in copy.deepcopy(obj_dict.get(backref_field, [])):
# if it's a backref to global resource let it
if backref['to'][0] in [PolicyManagement().name,
POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT]:
continue
self.internal_request_ref_update(
backref_type,
backref['uuid'],
'DELETE',
obj_type,
ref_uuid=obj_dict['uuid'],
)
held_refs.append(
((backref_type, backref['uuid'], 'ADD', obj_type),
{
'ref_fq_name': fq_name,
'attr': backref.get('attr')
}
)
)
obj_dict[backref_field].remove(backref)
def _security_discard_resources(self, pm):
for type_name in SECURITY_OBJECT_TYPES:
r_class = self.get_resource_class(type_name)
for child in pm.get('%ss' % r_class.object_type, []):
self.internal_request_delete(r_class.object_type,
child['uuid'])
def main(args_str=None, server=None):
vnc_api_server = server
pipe_start_app = vnc_api_server.get_pipe_start_app()
server_ip = vnc_api_server.get_listen_ip()
server_port = vnc_api_server.get_server_port()
""" @sigchld
Disable handling of SIG_CHLD for now as every keystone request to validate
token sends SIG_CHLD signal to API server.
"""
#hub.signal(signal.SIGCHLD, vnc_api_server.sigchld_handler)
hub.signal(signal.SIGTERM, vnc_api_server.sigterm_handler)
hub.signal(signal.SIGHUP, vnc_api_server.sighup_handler)
if pipe_start_app is None:
pipe_start_app = vnc_api_server.api_bottle
try:
bottle.run(app=pipe_start_app, host=server_ip, port=server_port,
server=get_bottle_server(server._args.max_requests))
except KeyboardInterrupt:
# quietly handle Ctrl-C
pass
finally:
# always cleanup gracefully
vnc_api_server.reset()
# end main
def server_main(args_str=None):
vnc_cgitb.enable(format='text')
main(args_str, VncApiServer(args_str))
#server_main
if __name__ == "__main__":
server_main()
|
en
| 0.675173
|
# # Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. # This is the main module in vnc_cfg_api_server package. It manages interaction between http/rest, address management, authentication and database interfaces. # from neutron plugin to api server, the request URL could be large. # fix the const # import GreenletProfiler Following is needed to silence warnings on every request when keystone auth_token middleware + Sandesh is used. Keystone or Sandesh alone do not produce these warnings. Exception AttributeError: AttributeError( "'_DummyThread' object has no attribute '_Thread__block'",) in <module 'threading' from '/usr/lib64/python2.7/threading.pyc'> ignored See http://stackoverflow.com/questions/13193278/understand-python-threading-bug for more information. # from gen_py.vnc_api.ttypes import * # end error_400 # end error_403 # end error_404 # end error_405 # end error_409 # end error_412 # end error_500 # end error_503 This is the manager class co-ordinating all classes present in the package # end __new__ # end _validate_complex_type Do not allow configuration of AAP with IPv4 prefix length less than 24 and 120 for IPv6. LP #1720118 # end _validate_allowed_address_pair_prefix_len # check if the job template id is a valid uuid # check if the job template fqname is a valid fq_name # check if the device id passed is a valid uuid #get the child process id that called the signal handler # update job manager execution status uve # read the job object log for a particular job to check if # it succeeded or not #send uve irrespective of the job log query # success/failure with job status # read the last PRouter state for all Prouetrs #remove the pid entry of the processed job_mgr process Payload of execute_job job_template_id (Mandatory if no job_template_fq_name): <uuid> of the created job_template job_template_fq_name (Mandatory if no job_template_id): fqname in the format: ["<global-system-config-name>", "<name of the job-template>"] input (Type json): Input Schema of the playbook under the job_template_id params (Type json): Extra_params for the job_manager (Eg. device_list) E.g. Payload: { "job_template_id": "<uuid>", "params": { "device_list": ["<device_uuid1>", "<device_uuid2>", .... "<device_uuidn>"] } } # TODO - pass the job manager config file from api server config # read the device object and pass the necessary data to the job # generate the job execution id # get the auth token # pass the required config args to job manager # create job manager fabric execution status uve # handle process exit signal # create job manager subprocess # get the fabric fq_name from the database if fabric_uuid is provided # If value is not an integer, then try to convert it to integer # end _validate_simple_type # end _check_mandatory_props_list # complex-type + value isn't dict or wrapped in list or map # end for all properties # end _validate_props_in_request # end _validate_refs_in_request # end _validate_perms_in_request # end _validate_resource_type # If not connected to zookeeper do not allow operations that # causes the state change # If there are too many pending updates to rabbit, do not allow # operations that cause state change # end _ensure_services_conn # end undo # http_resource_<oper> - handlers invoked from # a. bottle route (on-the-wire) OR # b. internal requests # using normalized get_request() from ApiContext # check visibility # lack of registered extension leads to RuntimeError # properties validator # references validator # Can abort resource creation and retrun 202 status code # Creation accepted but not applied, pending delete return 202 HTTP # OK code to aware clients # type-specific hook # common handling for all resource create # if client gave parent_type of config-root, ignore and remove # non config-root child, verify parent exists # Validate perms on references # State modification starts from here. Ensure that cleanup is done for all state changes # Alloc and Store id-mappings before creating entry on pubsub store. # Else a subscriber can ask for an id mapping before we have stored it # type-specific hook # Init quota counter # To be used for reverting back count when undo() is called #normal execution # type-specific hook # Create is done, log to system, no point in informing user # end stateful_create # Initialize quota counter if resource is project # non config-root child, send back parent uuid/href # lack of registered extension leads to RuntimeError # end http_resource_create # common handling for all resource get # Not present in DB # send Not-Modified, caches use this for read optimization # end if etag # Generate field list for db layer # default props + children + refs + backrefs # Not present in DB # check visibility # end http_resource_read # filter object references based on permissions # build new links in returned dict based on permissions on linked object # end obj_view # Early return if there is no body or an empty body # Look if the resource have a pending version, if yes use it as resource # to update # end http_resource_update # if obj doesn't exist return early # lack of registered extension leads to RuntimeError # read in obj from db (accepting error) to get details of it # proceed down to delete the resource # check visibility # common handling for all resource delete # Permit abort resource deletion and retrun 202 status code # Found back reference to existing enforced or draft resource # Deletion accepted but not applied, pending delete # return 202 HTTP OK code to aware clients # fail if non-default children or non-derived backrefs exist # State modification starts from here. Ensure that cleanup is done for all state changes # Delete default children first # quota counter obj not initialized # in this api-server, Init counter # quota limit is modified to unlimited # delete counter object # type-specific hook # Delete is done, log to system, no point in informing user # end stateful_delete # lack of registered extension leads to RuntimeError # end http_resource_delete # common handling for all resource get # end http_resource_list # internal_request_<oper> - handlers of internally generated requests # that save-ctx, generate-ctx and restore-ctx # end internal_request_create # end internal_request_update # end internal_request_delete # end internal_request_ref_update # Create a default child only if provisioned for # For virtual networks, allocate an ID # DB Create failed, log and stop further child creation. # recurse down type hierarchy # end create_default_children # Delete a default child only if provisioned for # first locate default child then delete it") # end delete_default_children # end _generate_resource_crud_methods # CRUD + list URIs of the form # obj.route('/virtual-network/<id>', 'GET', obj.virtual_network_http_get) # obj.route('/virtual-network/<id>', 'PUT', obj.virtual_network_http_put) # obj.route('/virtual-network/<id>', 'DELETE', obj.virtual_network_http_delete) # obj.route('/virtual-networks', 'POST', obj.virtual_networks_http_post) # obj.route('/virtual-networks', 'GET', obj.virtual_networks_http_get) # leaf resource # collection of leaf # end _generate_resource_crud_uri # set the max size of the api requests # multi_tenancy is ignored if aaa_mode is configured by user # MT configured by user - determine from aaa-mode # set python logging level from logging_level cmdline arg # Generate LinkObjects for all entities # Link for root #GreenletProfiler.set_clock_type('wall') # Register for VN delete request. Disallow delete of system default VN # APIs to reserve/free block of IP address from a VN/Subnet # APIs to find out number of ip instances from given VN subnet # Enable/Disable aaa mode # Set Tag actions # Commit or discard draft security policy # randomize the collector list # sandesh init # Reset the sandesh send rate limit value # Address Management interface # DB interface initialization # ZK quota counter initialization # status # API/Permissions check # after db init (uses db_conn) # Load extensions # Authn/z interface # lack of registered extension leads to RuntimeError # following allowed without authentication # allow all documentation # allow discovery # map of running job instances. Key is the pid and value is job # instance info # end __init__ # end _extensions_transform_request # end _extensions_validate_request # end _extensions_transform_response # Also log keystone response time against this request id, # before returning the trace message. # end _generate_rest_api_request_trace # end _generate_rest_api_response_trace # Public Methods # don't log details of cfgm_common.exceptions.HttpError i.e handled error cases # end route # end get_args # end get_server_ip # end get_listen_ip # end get_server_port # end get_worker_id # end get_pipe_start_app # end get_rabbit_health_check_interval # end get_auth_headers_from_token # Check for the system created VN. Disallow such VN delete # if obj doesn't exist return early # end # strip trailing '/' in url # strip trailing '/' in url # end homepage_http_get # ubuntu packaged path # centos packaged path # end documentation_http_get # roles in result['token_info']['access']['user']['roles'] # Handle v2 and v3 responses # end obj_perms_http_get # change ownership of an object # Not present in DB # ensure user has RW permissions to object #end obj_chown_http_post # chmod for an object # Not present in DB # ensure user has RW permissions to object item['tenant'] := [<share_type>:] <uuid> share_type := ['domain' | 'tenant'] # end obj_chmod_http_post # request validations over # common handling for all resource get # Not present in DB # check visibility # Prepare response # end prop_collection_http_get # validate each requested operation # Get actual resource from DB # Look if the resource have a pending version, if yes use it as resource # to update # end prop_collection_http_post # grab fields # validate fields # if UUID provided verify existence of the reference being added # To invoke type specific hook and extension manager # Look if the resource have a pending version, if yes use it as resource # to update # end ref_update_http_post # grab fields # validate fields # end ref_relax_for_delete_http_post # ensure user has access to this id # end fq_name_to_id_http_post # ensure user has access to this id # end id_to_fq_name_http_post # Enables a user-agent to store and retrieve key-val pair # TODO this should be done only for special/quantum plugin # TODO move values to common # end useragent_kv_http_post Check database for inconsistencies. No update to database # end db_check Retrieve and return all records # end fetch_records #GreenletProfiler.start() # end start_profile #GreenletProfiler.stop() #stats = GreenletProfiler.get_func_stats() #self._profile_info = stats.print_all() #return self._profile_info # end stop_profile # end get_profile_info # Create Placeholder classes derived from Resource, <Type> so # resource_class methods can be invoked in CRUD methods without # checking for None # end get_resource_class List collection when requested ids don't fit in query params. # e.g. virtual-network # end list_bulk_collection_http_post # Private Methods Eg. python vnc_cfg_api_server.py --cassandra_server_list 10.1.2.3:9160 10.1.2.4:9160 --redis_server_ip 127.0.0.1 --redis_server_port 6382 --collectors 127.0.0.1:8086 --http_server_port 8090 --listen_ip_addr 127.0.0.1 --listen_port 8082 --admin_port 8095 --region_name RegionOne --log_local --log_level SYS_DEBUG --logging_level DEBUG --logging_conf <logger-conf-file> --log_category test --log_file <stdout> --trace_file /var/log/contrail/vnc_openstack.err --use_syslog --syslog_facility LOG_USER --worker_id 1 --rabbit_max_pending_updates 4096 --rabbit_health_check_interval 120.0 --cluster_id <testbed-name> [--auth keystone] [--default_encoding ascii ] --object_cache_size 10000 --object_cache_exclude_types '' --max_request_size 1024000 # end _parse_args # sigchld handler is currently not engaged. See comment @sigchld # DB interface initialization # end sigchld_handler # sighup handler for applying new configs # Reconnect to achieve load-balance irrespective of list # end sighup_handler # end _load_extensions #TODO refacter db connection management. # end _db_connect Called at resource creation to ensure that id_perms is present in obj # retrieve object and permissions # Resource creation # retrieve the previous version of the id_perms # from the database and update the id_perms with # them. # not all fields can be updated # Start from default and update from obj_dict # TODO handle perms present in req_id_perms # end _ensure_id_perms_present # end _get_default_id_perms Called at resource creation to ensure that id_perms is present in obj # retrieve object and permissions # set ownership of object to creator tenant # Resource creation # Resource already exists # retrieve the previous version of the perms2 # from the database and update the perms2 with # them. # Start from default and update from obj_dict # TODO handle perms2 present in req_perms2 # ensure is_shared and global_access are consistent # NOTE(ethuleau): ignore exception for the moment as it breaks the # Neutron use case where external network have global access but # is property 'is_shared' is False https://review.opencontrail.org/#/q/Id6a0c1a509d7663da8e5bc86f2c7c91c73d420a2 # Before patch https://review.opencontrail.org/#q,I9f53c0f21983bf191b4c51318745eb348d48dd86,n,z # error was also ignored as all retruned errors of that method were # not took in account # raise cfgm_common.exceptions.HttpError(400, msg) # end _get_default_perms2 # create singleton defaults if they don't exist already in db # Global and default policy resources # specifying alarm kwargs like contrail_alarm.py # Create singleton SG __no_rule__ object for openstack # Create pre-defined tag-type #Load init data for job playbooks like JobTemplates, Tags, etc # make default ipam available across tenants for backward compatability # end _db_init_entries # Load init data for job playbooks like JobTemplates, Tags, etc This function loads init data from a data file specified by the argument '--fabric_ansible_dir' to the database. The data file must be in JSON format and follow the format below: { "data": [ { "object_type": "<vnc object type name>", "objects": [ { <vnc object payload> }, ... ] }, ... ] } Here is an example: { "data": [ { "object_type": "tag", "objects": [ { "fq_name": [ "fabric=management_ip" ], "name": "fabric=management_ip", "tag_type_name": "fabric", "tag_value": "management_ip" } ] } ] } # Get the class name from object type # Get the class object # saving the objects to the database # update default-global-system-config for supported_device_families # end Load init data # Load json data from fabric_ansible_playbooks/conf directory # open the json file # Loop through the json # end load json data # generate default rbac group rule # allow full access to cloud admin # ensure global list is not missing any default rules (bug 1642464) # end _create_default_rbac_rule # end _resync_domains_projects # TODO remove backward compat create mapping in zk # for singleton START # doesn't exist in zookeeper but does so in cassandra, # migrate this info to zookeeper # doesn't exist in cassandra as well as zookeeper, proceed normal # TODO backward compat END # create if it doesn't exist yet # For virtual networks, allocate an ID # end create_singleton_entry # allocate tag id for tag object # Allocate ID for tag value. Use the all fq_name to distinguish same # tag values between global and scoped # Compose Tag ID with the type ID and value ID # end allocate tag id # query params always appears as string # end _validate_page_marker # end _validate_page_limit # if marker is None, start scanning from uuid 0 # cookie to start next search # remainder count to finish page # If only counting, return early # It is possible that the object was deleted, but received # an update after that. We need to ignore it for now. In # future, we should clean up such stale objects # skip items not authorized # end obj_result in result # end not admin req # pagination request and done # pagination request and partially filled # end while not page_filled # send next marker along with results # end _list_collection # end get_db_connection # end generate_url # return a copy of obj_dict with href keys for: # self, parent, children, refs, backrefs # don't update obj_dict as it may be cached object # No parent # child_field doesn't exist in original # end for all child fields # ref_field doesn't exist in original # end for all ref fields # backref_field doesn't exist in original # end for all backref fields # end generate_hrefs # end config_object_error # end config_log # end _set_api_audit_info # uuid is parent's for collections # TODO check api + resource perms etc. # end _get_common # ZK and rabbitmq should be functional # lack of registered extension leads to RuntimeError # check visibility # properties validator (for collections validation in caller) # references validator # common handling for all resource put # Ensure object has at least default permissions set # Validate perms on references # State modification starts from here. Ensure that cleanup is done for all state changes # Permit abort resource update and retrun 202 status code # Modifications accepted but not applied, pending update # returns 202 HTTP OK code to aware clients # type-specific hook # read ref_update args # Update quota counter # type-specific hook # end stateful_update # Revert changes made to quota counter by using DB quota dict # lack of registered extension leads to RuntimeError # end _put_common # parent_type needed for perms check. None for derived objects (eg. # routing-instance) # If not connected to zookeeper do not allow operations that # causes the state change # If there are too many pending updates to rabbit, do not allow # operations that cause state change # TODO check api + resource perms etc. Validate parent allows write access. Implicitly trust parent info in the object since coming from our DB. # end _http_delete_common # well-formed name checks # end _post_validate # end validate_parent_type # TODO check api + resource perms etc. # Fail if object exists already # Ensure object has at least default permissions set # TODO check api + resource perms etc. # Set the display name # end _post_common # cleanup internal state/in-flight operations # end reset # allocate block of IP addresses from VN. Subnet info expected in request # body # expected format {"subnet_list" : "192.168.127.12/24", "count" : 4} # end vn_ip_alloc_http_post # free block of ip addresses to subnet { "subnet" : "192.168.127.12/24", "ip_addr": [ "192.168.3.11", "192.168.3.11", "192.168.127.12", "172.16.31.10" ] } # end vn_ip_free_http_post # return no. of IP addresses from VN/Subnet # expected format {"subnet_list" : ["2.1.1.0/24", "1.1.1.0/24"] # end vn_subnet_ip_count_http_post # check if token validatation needed # indication if multi tenancy with rbac is enabled or disabled # end # unless global, inherit project id from caller # lookup (validate) tag # If the body of a Tag type is None, all references to that Tag # type are remove on the resource # Else get defined values and update Tag references on the resource # Tag type is unique per object, unless # TAG_TYPE_NOT_UNIQUE_PER_OBJECT type # address-group object can only be associated with label # don't need to update if tag type with same value already # referenced # object already have a reference to that tag type with a # different value, remove it # finally, reference the tag type with the new value # Add 'value' attribut to 'add_values' list if not null # Draft dedicated policy management does not exists, the draft mode # is not enabled on the scope # TODO(ethuleau): we could return some stats or type/uuid resources # actions which were done during commit or discard? # No original version found, new resource created # Purge pending resource as we re-use the same UUID # The resource is removed, we can purge original resource # Update orginal resource with pending resource # if a ref type was purge when the draft mode is enabled, # set the ref to an empty list to ensure all refs will be # removed when resource will be updated/committed # Create new resource with pending values (re-use UUID) # Need to create/update leaf resources first as they could be # referenced by another create/updated resource (e.g.: FP -> FP) # order is: AG, SG, FR, FP and APS # Postpone delete to be sure deleted resource not anymore # referenced and delete resource with ref before resource with backref # order is: APS, FP, FR, SG and AG # if it's a backref to global resource let it @sigchld Disable handling of SIG_CHLD for now as every keystone request to validate token sends SIG_CHLD signal to API server. #hub.signal(signal.SIGCHLD, vnc_api_server.sigchld_handler) # quietly handle Ctrl-C # always cleanup gracefully # end main #server_main
| 1.43485
| 1
|
sanansaattaja/website/forms/message_form.py
|
KEZKA/YL-WEB-PROJECT
| 3
|
6625893
|
<reponame>KEZKA/YL-WEB-PROJECT
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.validators import DataRequired
class MessageForm(FlaskForm):
addressee = StringField('Whom', validators=[DataRequired()])
text = TextAreaField("Text", validators=[DataRequired()])
submit = SubmitField('Send')
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.validators import DataRequired
class MessageForm(FlaskForm):
addressee = StringField('Whom', validators=[DataRequired()])
text = TextAreaField("Text", validators=[DataRequired()])
submit = SubmitField('Send')
|
none
| 1
| 2.556902
| 3
|
|
data.py
|
LiuXinyu12378/MatchingNetworks
| 1
|
6625894
|
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import tqdm
import concurrent.futures
import pickle
from skimage import transform
def augment_image(image, k, channels):
if channels==1:
image = image[:, :, 0]
image = transform.rotate(image, angle=k, resize=False, center=None, order=1, mode='constant',
cval=0, clip=True, preserve_range=False)
if channels==1:
image = np.expand_dims(image, axis=2)
return image
class MatchingNetworkDatasetParallel(Dataset):
def __init__(self, batch_size, reverse_channels, num_of_gpus, image_height, image_width, image_channels,
train_val_test_split, num_classes_per_set, num_samples_per_class,
data_path, dataset_name, indexes_of_folders_indicating_class,
seed=100, reset_stored_filepaths=False, labels_as_int=False):
"""
:param batch_size: The batch size to use for the data loader
:param last_training_class_index: The final index for the training set, used to restrict the training set
if needed. E.g. if training set is 1200 classes and last_training_class_index=900 then only the first 900
classes will be used
:param reverse_channels: A boolean indicating whether we need to reverse the colour channels e.g. RGB to BGR
:param num_of_gpus: Number of gpus to use for training
:param gen_batches: How many batches to use from the validation set for the end of epoch generations
"""
self.data_path = data_path
self.dataset_name = dataset_name
self.indexes_of_folders_indicating_class = indexes_of_folders_indicating_class
self.labels_as_int = labels_as_int
self.train_val_test_split = train_val_test_split
self.current_dataset_name = "train"
self.reset_stored_filepaths = reset_stored_filepaths
self.x_train, self.x_val, self.x_test = self.load_dataset()
self.num_of_gpus = num_of_gpus
self.batch_size = batch_size
self.reverse_channels = reverse_channels
self.image_height, self.image_width, self.image_channel = image_height, image_width, image_channels
self.train_index = 0
self.val_index = 0
self.test_index = 0
self.init_seed = {"train": seed, "val": seed, "test": seed}
self.seed = {"train": seed, "val": seed, "test": seed}
self.augment_images = False
self.num_samples_per_class = num_samples_per_class
self.num_classes_per_set = num_classes_per_set
self.indexes = {"train": 0, "val": 0, "test": 0}
self.datasets = {"train": self.x_train,
"val": self.x_val,
"test": self.x_test}
self.dataset_size_dict = {"train": {key: len(self.x_train[key]) for key in list(self.x_train.keys())},
"val": {key: len(self.x_val[key]) for key in list(self.x_val.keys())},
"test": {key: len(self.x_test[key]) for key in list(self.x_test.keys())}}
self.label_set = self.get_label_set()
self.data_length = {name: np.sum([len(self.datasets[name][key])
for key in self.datasets[name]]) for name in self.datasets.keys()}
print("data", self.data_length)
#print(self.datasets)
def load_dataset(self):
data_image_paths, index_to_label_name_dict_file, label_to_index = self.load_datapaths()
total_label_types = len(data_image_paths)
print(total_label_types)
# data_image_paths = self.shuffle(data_image_paths)
x_train_id, x_val_id, x_test_id = int(self.train_val_test_split[0] * total_label_types), \
int(np.sum(self.train_val_test_split[:2]) * total_label_types), \
int(total_label_types)
print(x_train_id, x_val_id, x_test_id)
x_train_classes = (class_key for class_key in list(data_image_paths.keys())[:x_train_id])
x_val_classes = (class_key for class_key in list(data_image_paths.keys())[x_train_id:x_val_id])
x_test_classes = (class_key for class_key in list(data_image_paths.keys())[x_val_id:x_test_id])
x_train, x_val, x_test = {class_key: data_image_paths[class_key] for class_key in x_train_classes}, \
{class_key: data_image_paths[class_key] for class_key in x_val_classes}, \
{class_key: data_image_paths[class_key] for class_key in x_test_classes},
return x_train, x_val, x_test
def load_datapaths(self):
data_path_file = "datasets/{}.pkl".format(self.dataset_name)
self.index_to_label_name_dict_file = "datasets/map_to_label_name_{}.pkl".format(self.dataset_name)
self.label_name_to_map_dict_file = "datasets/label_name_to_map_{}.pkl".format(self.dataset_name)
if self.reset_stored_filepaths == True:
if os.path.exists(data_path_file):
os.remove(data_path_file)
self.reset_stored_filepaths=False
try:
data_image_paths = self.load_dict(data_path_file)
label_to_index = self.load_dict(name=self.label_name_to_map_dict_file)
index_to_label_name_dict_file = self.load_dict(name=self.index_to_label_name_dict_file)
return data_image_paths, index_to_label_name_dict_file, label_to_index
except:
print("Mapped data paths can't be found, remapping paths..")
data_image_paths, code_to_label_name, label_name_to_code = self.get_data_paths()
self.save_dict(data_image_paths, name=data_path_file)
self.save_dict(code_to_label_name, name=self.index_to_label_name_dict_file)
self.save_dict(label_name_to_code, name=self.label_name_to_map_dict_file)
return self.load_datapaths()
def save_dict(self, obj, name):
with open(name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_dict(self, name):
with open(name, 'rb') as f:
return pickle.load(f)
def load_test_image(self, filepath):
try:
image = cv2.imread(filepath)
image = cv2.resize(image, dsize=(28, 28))
except RuntimeWarning:
os.system("convert {} -strip {}".format(filepath, filepath))
print("converting")
image = cv2.imread(filepath)
image = cv2.resize(image, dsize=(28, 28))
except:
print("Broken image")
os.remove(filepath)
if image is not None:
return filepath
else:
os.remove(filepath)
return None
def get_data_paths(self):
print("Get images from", self.data_path)
data_image_path_list_raw = []
labels = set()
for subdir, dir, files in os.walk(self.data_path):
for file in files:
if (".jpeg") in file.lower() or (".png") in file.lower() or (".jpg") in file.lower():
filepath = os.path.join(subdir, file)
label = self.get_label_from_path(filepath)
data_image_path_list_raw.append(filepath)
labels.add(label)
labels = sorted(labels)
idx_to_label_name = {idx: label for idx, label in enumerate(labels)}
label_name_to_idx = {label: idx for idx, label in enumerate(labels)}
data_image_path_dict = {idx: [] for idx in list(idx_to_label_name.keys())}
# with tqdm.tqdm(total=len(data_image_path_list_raw)) as pbar_error:
# with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:
# # Process the list of files, but split the work across the process pool to use all CPUs!
# for image_file in executor.map(self.load_test_image, (data_image_path_list_raw)):
# pbar_error.update(1)
# if image_file is not None:
# label = self.get_label_from_path(image_file)
# data_image_path_dict[label_name_to_idx[label]].append(image_file)
bar = tqdm.tqdm(data_image_path_list_raw,total=len(data_image_path_list_raw))
for image_file in bar:
if image_file is not None:
label = self.get_label_from_path(image_file)
data_image_path_dict[label_name_to_idx[label]].append(image_file)
return data_image_path_dict, idx_to_label_name, label_name_to_idx
def get_label_set(self):
index_to_label_name_dict_file = self.load_dict(name=self.index_to_label_name_dict_file)
return set(list(index_to_label_name_dict_file.keys()))
def get_index_from_label(self, label):
label_to_index = self.load_dict(name=self.label_name_to_map_dict_file)
return label_to_index[label]
def get_label_from_index(self, index):
index_to_label_name = self.load_dict(name=self.index_to_label_name_dict_file)
return index_to_label_name[index]
def get_label_from_path(self, filepath):
label_bits = filepath.split("\\")
label = "_".join([label_bits[idx] for idx in self.indexes_of_folders_indicating_class])
if self.labels_as_int:
label = int(label)
return label
def load_image(self, image_path, channels):
image = cv2.imread(image_path)[:, :, :channels]
image = cv2.resize(image, dsize=(self.image_height, self.image_width))
if channels==1:
image = np.expand_dims(image, axis=2)
return image
def load_batch(self, batch_image_paths):
image_batch = []
image_paths = []
for image_path in batch_image_paths:
image_paths.append(image_path)
for image_path in image_paths:
image = self.load_image(image_path=image_path, channels=self.image_channel)
image_batch.append(image)
image_batch = np.array(image_batch, dtype=np.float32)
image_batch = self.preprocess_data(image_batch)
return image_batch
def preprocess_data(self, x):
"""
Preprocesses data such that their values lie in the -1.0 to 1.0 range so that the tanh activation gen output
can work properly
:param x: A data batch to preprocess
:return: A preprocessed data batch
"""
x = x / 255.0
x = 2 * x - 1
x_shape = x.shape
x = np.reshape(x, (-1, x_shape[-3], x_shape[-2], x_shape[-1]))
if self.reverse_channels is True:
reverse_photos = np.ones(shape=x.shape)
for channel in range(x.shape[-1]):
reverse_photos[:, :, :, x.shape[-1] - 1 - channel] = x[:, :, :, channel]
x = reverse_photos
x = x.reshape(x_shape)
# print(x.mean(), x.min(), x.max())
return x
def reconstruct_original(self, x):
"""
Applies the reverse operations that preprocess_data() applies such that the data returns to their original form
:param x: A batch of data to reconstruct
:return: A reconstructed batch of data
"""
x = (x + 1) / 2
x = x * 255.0
return x
def shuffle(self, x):
"""
Shuffles the data batch along it's first axis
:param x: A data batch
:return: A shuffled data batch
"""
indices = np.arange(len(x))
np.random.shuffle(indices)
x = x[indices]
return x
def get_set(self, dataset_name, seed, augment_images=False):
"""
Generates a data batch to be used for training or evaluation
:param set_name: The name of the set to use, e.g. "train", "val" etc
:return: A data batch
"""
rng = np.random.RandomState(seed)
selected_classes = rng.choice(list(self.dataset_size_dict[dataset_name].keys()),
size=self.num_classes_per_set, replace=False)
target_class = rng.choice(selected_classes, size=1, replace=False)[0]
k_list = rng.randint(0, 3, size=self.num_classes_per_set)
k_dict = {selected_class: k_item for (selected_class, k_item) in zip(selected_classes, k_list)}
episode_labels = [i for i in range(self.num_classes_per_set)]
class_to_episode_label = {selected_class: episode_label for (selected_class, episode_label) in
zip(selected_classes, episode_labels)}
support_set_images = []
support_set_labels = []
for class_entry in selected_classes:
choose_samples_list = rng.choice(self.dataset_size_dict[dataset_name][class_entry],
size=self.num_samples_per_class, replace=True)
class_image_samples = []
class_labels = []
for sample in choose_samples_list:
choose_samples = self.datasets[dataset_name][class_entry][sample]
x_class_data = self.load_batch([choose_samples])[0]
if augment_images is True:
k = k_dict[class_entry]
x_class_data = augment_image(image=x_class_data, k=k*90, channels=self.image_channel)
class_image_samples.append(x_class_data)
class_labels.append(int(class_to_episode_label[class_entry]))
support_set_images.append(class_image_samples)
support_set_labels.append(class_labels)
support_set_images = np.array(support_set_images, dtype=np.float32)
support_set_labels = np.array(support_set_labels, dtype=np.int32)
target_sample = rng.choice(self.dataset_size_dict[dataset_name][target_class], size=1,
replace=True)[0]
choose_samples = self.datasets[dataset_name][target_class][target_sample]
target_set_image = self.load_batch([choose_samples])[0]
if augment_images is True:
k = k_dict[target_class]
target_set_image = augment_image(image=target_set_image, k=k * 90, channels=self.image_channel)
target_set_label = int(class_to_episode_label[target_class])
return support_set_images, target_set_image, support_set_labels, target_set_label
def __len__(self):
total_samples = self.data_length[self.current_dataset_name]
return total_samples
def length(self, dataset_name):
self.switch_set(dataset_name=dataset_name)
return len(self)
def set_augmentation(self, augment_images):
self.augment_images = augment_images
def switch_set(self, dataset_name, seed=100):
self.current_dataset_name = dataset_name
if dataset_name=="train":
self.update_seed(dataset_name=dataset_name, seed=seed)
def update_seed(self, dataset_name, seed=100):
self.init_seed[dataset_name] = seed
def __getitem__(self, idx):
support_set_images, target_set_image, support_set_labels, target_set_label = \
self.get_set(self.current_dataset_name, seed=self.init_seed[self.current_dataset_name] + idx, augment_images=self.augment_images)
data_point = {"support_set_images": support_set_images, "target_set_image": target_set_image,
"support_set_labels": support_set_labels, "target_set_label": target_set_label}
self.seed[self.current_dataset_name] = self.seed[self.current_dataset_name] + 1
return data_point
def reset_seed(self):
self.seed = self.init_seed
class MatchingNetworkLoader(object):
def __init__(self, name, num_of_gpus, batch_size, image_height, image_width, image_channels, num_classes_per_set, data_path,
num_samples_per_class, train_val_test_split,
samples_per_iter=1, num_workers=4, reverse_channels=False, seed=100, labels_as_int=False):
self.zip_dir = "datasets/{}.zip".format(name)
self.data_folder_dir = "datasets/{}".format(name)
self.datasets_dir = "datasets/"
self.num_of_gpus = num_of_gpus
self.batch_size = batch_size
self.samples_per_iter = samples_per_iter
self.num_workers = num_workers
self.total_train_iters_produced = 0
self.dataset = self.get_dataset(batch_size, reverse_channels, num_of_gpus, image_height, image_width, image_channels,
train_val_test_split, num_classes_per_set, num_samples_per_class, seed=seed,
reset_stored_filepaths=False, data_path=data_path, labels_as_int=labels_as_int)
self.batches_per_iter = samples_per_iter
self.full_data_length = self.dataset.data_length
def get_dataloader(self, shuffle=False):
return DataLoader(self.dataset, batch_size=(self.num_of_gpus * self.batch_size * self.samples_per_iter),
shuffle=shuffle, num_workers=self.num_workers, drop_last=True)
def get_dataset(self, batch_size, reverse_channels, num_of_gpus, image_height, image_width, image_channels,
train_val_test_split, num_classes_per_set, num_samples_per_class, seed,
reset_stored_filepaths, data_path, labels_as_int):
return NotImplementedError
def get_train_batches(self, total_batches=-1, augment_images=False):
if total_batches==-1:
self.dataset.data_length = self.full_data_length
else:
self.dataset.data_length["train"] = total_batches * self.dataset.batch_size
self.dataset.switch_set(dataset_name="train",
seed=self.dataset.init_seed["train"] + self.total_train_iters_produced)
self.dataset.set_augmentation(augment_images=augment_images)
self.total_train_iters_produced += self.dataset.data_length["train"]
for sample_id, sample_batched in enumerate(self.get_dataloader(shuffle=True)):
preprocess_sample = self.sample_iter_data(sample=sample_batched, num_gpus=self.dataset.num_of_gpus,
samples_per_iter=self.batches_per_iter,
batch_size=self.dataset.batch_size)
yield preprocess_sample
def get_val_batches(self, total_batches=-1, augment_images=False):
if total_batches==-1:
self.dataset.data_length = self.full_data_length
else:
self.dataset.data_length['val'] = total_batches * self.dataset.batch_size
self.dataset.switch_set(dataset_name="val")
self.dataset.set_augmentation(augment_images=augment_images)
for sample_id, sample_batched in enumerate(self.get_dataloader(shuffle=False)):
preprocess_sample = self.sample_iter_data(sample=sample_batched, num_gpus=self.dataset.num_of_gpus,
samples_per_iter=self.batches_per_iter,
batch_size=self.dataset.batch_size)
yield preprocess_sample
def get_test_batches(self, total_batches=-1, augment_images=False):
if total_batches==-1:
self.dataset.data_length = self.full_data_length
else:
self.dataset.data_length['test'] = total_batches * self.dataset.batch_size
self.dataset.switch_set(dataset_name="test")
self.dataset.set_augmentation(augment_images=augment_images)
for sample_id, sample_batched in enumerate(self.get_dataloader(shuffle=False)):
preprocess_sample = self.sample_iter_data(sample=sample_batched, num_gpus=self.dataset.num_of_gpus,
samples_per_iter=self.batches_per_iter,
batch_size=self.dataset.batch_size)
yield preprocess_sample
def sample_iter_data(self, sample, num_gpus, batch_size, samples_per_iter):
output_sample = []
for key in sample.keys():
sample[key] = np.array(sample[key].numpy(), dtype=np.float32)
new_shape = []
curr_id = 1
for i in range(len(sample[key].shape) + 2):
if i == 0:
new_shape.append(samples_per_iter)
elif i == 1:
new_shape.append(num_gpus)
elif i == 2:
new_shape.append(batch_size)
else:
new_shape.append(sample[key].shape[curr_id])
curr_id += 1
output_sample.append(np.reshape(sample[key], newshape=new_shape))
return output_sample
class FolderMatchingNetworkDatasetParallel(MatchingNetworkDatasetParallel):
def __init__(self, name, num_of_gpus, batch_size, image_height, image_width, image_channels,
train_val_test_split, data_path, indexes_of_folders_indicating_class, reset_stored_filepaths,
num_samples_per_class, num_classes_per_set, labels_as_int, reverse_channels):
super(FolderMatchingNetworkDatasetParallel, self).__init__(
batch_size=batch_size, reverse_channels=reverse_channels,
num_of_gpus=num_of_gpus, image_height=image_height,
image_width=image_width, image_channels=image_channels,
train_val_test_split=train_val_test_split, reset_stored_filepaths=reset_stored_filepaths,
num_classes_per_set=num_classes_per_set, num_samples_per_class=num_samples_per_class,
labels_as_int=labels_as_int, data_path=os.path.abspath(data_path), dataset_name=name,
indexes_of_folders_indicating_class=indexes_of_folders_indicating_class)
class FolderDatasetLoader(MatchingNetworkLoader):
def __init__(self, name, batch_size, image_height, image_width, image_channels, data_path, train_val_test_split,
num_of_gpus=1, samples_per_iter=1, num_workers=4, indexes_of_folders_indicating_class=[-2],
reset_stored_filepaths=False, num_samples_per_class=1, num_classes_per_set=20, reverse_channels=False,
seed=100, label_as_int=False):
self.name = name
self.indexes_of_folders_indicating_class = indexes_of_folders_indicating_class
self.reset_stored_filepaths = reset_stored_filepaths
super(FolderDatasetLoader, self).__init__(name, num_of_gpus, batch_size, image_height, image_width, image_channels, num_classes_per_set, data_path,
num_samples_per_class, train_val_test_split,
samples_per_iter, num_workers, reverse_channels, seed, labels_as_int=label_as_int)
def get_dataset(self, batch_size, reverse_channels, num_of_gpus, image_height, image_width, image_channels,
train_val_test_split, num_classes_per_set, num_samples_per_class, seed,
reset_stored_filepaths, data_path, labels_as_int):
return FolderMatchingNetworkDatasetParallel(name=self.name, num_of_gpus=num_of_gpus, batch_size=batch_size,
image_height=image_height, image_width=image_width,
image_channels=image_channels,
train_val_test_split=train_val_test_split, data_path=data_path,
indexes_of_folders_indicating_class=self.indexes_of_folders_indicating_class,
reset_stored_filepaths=self.reset_stored_filepaths,
num_samples_per_class=num_samples_per_class,
num_classes_per_set=num_classes_per_set, labels_as_int=labels_as_int,
reverse_channels=reverse_channels)
|
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import tqdm
import concurrent.futures
import pickle
from skimage import transform
def augment_image(image, k, channels):
if channels==1:
image = image[:, :, 0]
image = transform.rotate(image, angle=k, resize=False, center=None, order=1, mode='constant',
cval=0, clip=True, preserve_range=False)
if channels==1:
image = np.expand_dims(image, axis=2)
return image
class MatchingNetworkDatasetParallel(Dataset):
def __init__(self, batch_size, reverse_channels, num_of_gpus, image_height, image_width, image_channels,
train_val_test_split, num_classes_per_set, num_samples_per_class,
data_path, dataset_name, indexes_of_folders_indicating_class,
seed=100, reset_stored_filepaths=False, labels_as_int=False):
"""
:param batch_size: The batch size to use for the data loader
:param last_training_class_index: The final index for the training set, used to restrict the training set
if needed. E.g. if training set is 1200 classes and last_training_class_index=900 then only the first 900
classes will be used
:param reverse_channels: A boolean indicating whether we need to reverse the colour channels e.g. RGB to BGR
:param num_of_gpus: Number of gpus to use for training
:param gen_batches: How many batches to use from the validation set for the end of epoch generations
"""
self.data_path = data_path
self.dataset_name = dataset_name
self.indexes_of_folders_indicating_class = indexes_of_folders_indicating_class
self.labels_as_int = labels_as_int
self.train_val_test_split = train_val_test_split
self.current_dataset_name = "train"
self.reset_stored_filepaths = reset_stored_filepaths
self.x_train, self.x_val, self.x_test = self.load_dataset()
self.num_of_gpus = num_of_gpus
self.batch_size = batch_size
self.reverse_channels = reverse_channels
self.image_height, self.image_width, self.image_channel = image_height, image_width, image_channels
self.train_index = 0
self.val_index = 0
self.test_index = 0
self.init_seed = {"train": seed, "val": seed, "test": seed}
self.seed = {"train": seed, "val": seed, "test": seed}
self.augment_images = False
self.num_samples_per_class = num_samples_per_class
self.num_classes_per_set = num_classes_per_set
self.indexes = {"train": 0, "val": 0, "test": 0}
self.datasets = {"train": self.x_train,
"val": self.x_val,
"test": self.x_test}
self.dataset_size_dict = {"train": {key: len(self.x_train[key]) for key in list(self.x_train.keys())},
"val": {key: len(self.x_val[key]) for key in list(self.x_val.keys())},
"test": {key: len(self.x_test[key]) for key in list(self.x_test.keys())}}
self.label_set = self.get_label_set()
self.data_length = {name: np.sum([len(self.datasets[name][key])
for key in self.datasets[name]]) for name in self.datasets.keys()}
print("data", self.data_length)
#print(self.datasets)
def load_dataset(self):
data_image_paths, index_to_label_name_dict_file, label_to_index = self.load_datapaths()
total_label_types = len(data_image_paths)
print(total_label_types)
# data_image_paths = self.shuffle(data_image_paths)
x_train_id, x_val_id, x_test_id = int(self.train_val_test_split[0] * total_label_types), \
int(np.sum(self.train_val_test_split[:2]) * total_label_types), \
int(total_label_types)
print(x_train_id, x_val_id, x_test_id)
x_train_classes = (class_key for class_key in list(data_image_paths.keys())[:x_train_id])
x_val_classes = (class_key for class_key in list(data_image_paths.keys())[x_train_id:x_val_id])
x_test_classes = (class_key for class_key in list(data_image_paths.keys())[x_val_id:x_test_id])
x_train, x_val, x_test = {class_key: data_image_paths[class_key] for class_key in x_train_classes}, \
{class_key: data_image_paths[class_key] for class_key in x_val_classes}, \
{class_key: data_image_paths[class_key] for class_key in x_test_classes},
return x_train, x_val, x_test
def load_datapaths(self):
data_path_file = "datasets/{}.pkl".format(self.dataset_name)
self.index_to_label_name_dict_file = "datasets/map_to_label_name_{}.pkl".format(self.dataset_name)
self.label_name_to_map_dict_file = "datasets/label_name_to_map_{}.pkl".format(self.dataset_name)
if self.reset_stored_filepaths == True:
if os.path.exists(data_path_file):
os.remove(data_path_file)
self.reset_stored_filepaths=False
try:
data_image_paths = self.load_dict(data_path_file)
label_to_index = self.load_dict(name=self.label_name_to_map_dict_file)
index_to_label_name_dict_file = self.load_dict(name=self.index_to_label_name_dict_file)
return data_image_paths, index_to_label_name_dict_file, label_to_index
except:
print("Mapped data paths can't be found, remapping paths..")
data_image_paths, code_to_label_name, label_name_to_code = self.get_data_paths()
self.save_dict(data_image_paths, name=data_path_file)
self.save_dict(code_to_label_name, name=self.index_to_label_name_dict_file)
self.save_dict(label_name_to_code, name=self.label_name_to_map_dict_file)
return self.load_datapaths()
def save_dict(self, obj, name):
with open(name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_dict(self, name):
with open(name, 'rb') as f:
return pickle.load(f)
def load_test_image(self, filepath):
try:
image = cv2.imread(filepath)
image = cv2.resize(image, dsize=(28, 28))
except RuntimeWarning:
os.system("convert {} -strip {}".format(filepath, filepath))
print("converting")
image = cv2.imread(filepath)
image = cv2.resize(image, dsize=(28, 28))
except:
print("Broken image")
os.remove(filepath)
if image is not None:
return filepath
else:
os.remove(filepath)
return None
def get_data_paths(self):
print("Get images from", self.data_path)
data_image_path_list_raw = []
labels = set()
for subdir, dir, files in os.walk(self.data_path):
for file in files:
if (".jpeg") in file.lower() or (".png") in file.lower() or (".jpg") in file.lower():
filepath = os.path.join(subdir, file)
label = self.get_label_from_path(filepath)
data_image_path_list_raw.append(filepath)
labels.add(label)
labels = sorted(labels)
idx_to_label_name = {idx: label for idx, label in enumerate(labels)}
label_name_to_idx = {label: idx for idx, label in enumerate(labels)}
data_image_path_dict = {idx: [] for idx in list(idx_to_label_name.keys())}
# with tqdm.tqdm(total=len(data_image_path_list_raw)) as pbar_error:
# with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:
# # Process the list of files, but split the work across the process pool to use all CPUs!
# for image_file in executor.map(self.load_test_image, (data_image_path_list_raw)):
# pbar_error.update(1)
# if image_file is not None:
# label = self.get_label_from_path(image_file)
# data_image_path_dict[label_name_to_idx[label]].append(image_file)
bar = tqdm.tqdm(data_image_path_list_raw,total=len(data_image_path_list_raw))
for image_file in bar:
if image_file is not None:
label = self.get_label_from_path(image_file)
data_image_path_dict[label_name_to_idx[label]].append(image_file)
return data_image_path_dict, idx_to_label_name, label_name_to_idx
def get_label_set(self):
index_to_label_name_dict_file = self.load_dict(name=self.index_to_label_name_dict_file)
return set(list(index_to_label_name_dict_file.keys()))
def get_index_from_label(self, label):
label_to_index = self.load_dict(name=self.label_name_to_map_dict_file)
return label_to_index[label]
def get_label_from_index(self, index):
index_to_label_name = self.load_dict(name=self.index_to_label_name_dict_file)
return index_to_label_name[index]
def get_label_from_path(self, filepath):
label_bits = filepath.split("\\")
label = "_".join([label_bits[idx] for idx in self.indexes_of_folders_indicating_class])
if self.labels_as_int:
label = int(label)
return label
def load_image(self, image_path, channels):
image = cv2.imread(image_path)[:, :, :channels]
image = cv2.resize(image, dsize=(self.image_height, self.image_width))
if channels==1:
image = np.expand_dims(image, axis=2)
return image
def load_batch(self, batch_image_paths):
image_batch = []
image_paths = []
for image_path in batch_image_paths:
image_paths.append(image_path)
for image_path in image_paths:
image = self.load_image(image_path=image_path, channels=self.image_channel)
image_batch.append(image)
image_batch = np.array(image_batch, dtype=np.float32)
image_batch = self.preprocess_data(image_batch)
return image_batch
def preprocess_data(self, x):
"""
Preprocesses data such that their values lie in the -1.0 to 1.0 range so that the tanh activation gen output
can work properly
:param x: A data batch to preprocess
:return: A preprocessed data batch
"""
x = x / 255.0
x = 2 * x - 1
x_shape = x.shape
x = np.reshape(x, (-1, x_shape[-3], x_shape[-2], x_shape[-1]))
if self.reverse_channels is True:
reverse_photos = np.ones(shape=x.shape)
for channel in range(x.shape[-1]):
reverse_photos[:, :, :, x.shape[-1] - 1 - channel] = x[:, :, :, channel]
x = reverse_photos
x = x.reshape(x_shape)
# print(x.mean(), x.min(), x.max())
return x
def reconstruct_original(self, x):
"""
Applies the reverse operations that preprocess_data() applies such that the data returns to their original form
:param x: A batch of data to reconstruct
:return: A reconstructed batch of data
"""
x = (x + 1) / 2
x = x * 255.0
return x
def shuffle(self, x):
"""
Shuffles the data batch along it's first axis
:param x: A data batch
:return: A shuffled data batch
"""
indices = np.arange(len(x))
np.random.shuffle(indices)
x = x[indices]
return x
def get_set(self, dataset_name, seed, augment_images=False):
"""
Generates a data batch to be used for training or evaluation
:param set_name: The name of the set to use, e.g. "train", "val" etc
:return: A data batch
"""
rng = np.random.RandomState(seed)
selected_classes = rng.choice(list(self.dataset_size_dict[dataset_name].keys()),
size=self.num_classes_per_set, replace=False)
target_class = rng.choice(selected_classes, size=1, replace=False)[0]
k_list = rng.randint(0, 3, size=self.num_classes_per_set)
k_dict = {selected_class: k_item for (selected_class, k_item) in zip(selected_classes, k_list)}
episode_labels = [i for i in range(self.num_classes_per_set)]
class_to_episode_label = {selected_class: episode_label for (selected_class, episode_label) in
zip(selected_classes, episode_labels)}
support_set_images = []
support_set_labels = []
for class_entry in selected_classes:
choose_samples_list = rng.choice(self.dataset_size_dict[dataset_name][class_entry],
size=self.num_samples_per_class, replace=True)
class_image_samples = []
class_labels = []
for sample in choose_samples_list:
choose_samples = self.datasets[dataset_name][class_entry][sample]
x_class_data = self.load_batch([choose_samples])[0]
if augment_images is True:
k = k_dict[class_entry]
x_class_data = augment_image(image=x_class_data, k=k*90, channels=self.image_channel)
class_image_samples.append(x_class_data)
class_labels.append(int(class_to_episode_label[class_entry]))
support_set_images.append(class_image_samples)
support_set_labels.append(class_labels)
support_set_images = np.array(support_set_images, dtype=np.float32)
support_set_labels = np.array(support_set_labels, dtype=np.int32)
target_sample = rng.choice(self.dataset_size_dict[dataset_name][target_class], size=1,
replace=True)[0]
choose_samples = self.datasets[dataset_name][target_class][target_sample]
target_set_image = self.load_batch([choose_samples])[0]
if augment_images is True:
k = k_dict[target_class]
target_set_image = augment_image(image=target_set_image, k=k * 90, channels=self.image_channel)
target_set_label = int(class_to_episode_label[target_class])
return support_set_images, target_set_image, support_set_labels, target_set_label
def __len__(self):
total_samples = self.data_length[self.current_dataset_name]
return total_samples
def length(self, dataset_name):
self.switch_set(dataset_name=dataset_name)
return len(self)
def set_augmentation(self, augment_images):
self.augment_images = augment_images
def switch_set(self, dataset_name, seed=100):
self.current_dataset_name = dataset_name
if dataset_name=="train":
self.update_seed(dataset_name=dataset_name, seed=seed)
def update_seed(self, dataset_name, seed=100):
self.init_seed[dataset_name] = seed
def __getitem__(self, idx):
support_set_images, target_set_image, support_set_labels, target_set_label = \
self.get_set(self.current_dataset_name, seed=self.init_seed[self.current_dataset_name] + idx, augment_images=self.augment_images)
data_point = {"support_set_images": support_set_images, "target_set_image": target_set_image,
"support_set_labels": support_set_labels, "target_set_label": target_set_label}
self.seed[self.current_dataset_name] = self.seed[self.current_dataset_name] + 1
return data_point
def reset_seed(self):
self.seed = self.init_seed
class MatchingNetworkLoader(object):
def __init__(self, name, num_of_gpus, batch_size, image_height, image_width, image_channels, num_classes_per_set, data_path,
num_samples_per_class, train_val_test_split,
samples_per_iter=1, num_workers=4, reverse_channels=False, seed=100, labels_as_int=False):
self.zip_dir = "datasets/{}.zip".format(name)
self.data_folder_dir = "datasets/{}".format(name)
self.datasets_dir = "datasets/"
self.num_of_gpus = num_of_gpus
self.batch_size = batch_size
self.samples_per_iter = samples_per_iter
self.num_workers = num_workers
self.total_train_iters_produced = 0
self.dataset = self.get_dataset(batch_size, reverse_channels, num_of_gpus, image_height, image_width, image_channels,
train_val_test_split, num_classes_per_set, num_samples_per_class, seed=seed,
reset_stored_filepaths=False, data_path=data_path, labels_as_int=labels_as_int)
self.batches_per_iter = samples_per_iter
self.full_data_length = self.dataset.data_length
def get_dataloader(self, shuffle=False):
return DataLoader(self.dataset, batch_size=(self.num_of_gpus * self.batch_size * self.samples_per_iter),
shuffle=shuffle, num_workers=self.num_workers, drop_last=True)
def get_dataset(self, batch_size, reverse_channels, num_of_gpus, image_height, image_width, image_channels,
train_val_test_split, num_classes_per_set, num_samples_per_class, seed,
reset_stored_filepaths, data_path, labels_as_int):
return NotImplementedError
def get_train_batches(self, total_batches=-1, augment_images=False):
if total_batches==-1:
self.dataset.data_length = self.full_data_length
else:
self.dataset.data_length["train"] = total_batches * self.dataset.batch_size
self.dataset.switch_set(dataset_name="train",
seed=self.dataset.init_seed["train"] + self.total_train_iters_produced)
self.dataset.set_augmentation(augment_images=augment_images)
self.total_train_iters_produced += self.dataset.data_length["train"]
for sample_id, sample_batched in enumerate(self.get_dataloader(shuffle=True)):
preprocess_sample = self.sample_iter_data(sample=sample_batched, num_gpus=self.dataset.num_of_gpus,
samples_per_iter=self.batches_per_iter,
batch_size=self.dataset.batch_size)
yield preprocess_sample
def get_val_batches(self, total_batches=-1, augment_images=False):
if total_batches==-1:
self.dataset.data_length = self.full_data_length
else:
self.dataset.data_length['val'] = total_batches * self.dataset.batch_size
self.dataset.switch_set(dataset_name="val")
self.dataset.set_augmentation(augment_images=augment_images)
for sample_id, sample_batched in enumerate(self.get_dataloader(shuffle=False)):
preprocess_sample = self.sample_iter_data(sample=sample_batched, num_gpus=self.dataset.num_of_gpus,
samples_per_iter=self.batches_per_iter,
batch_size=self.dataset.batch_size)
yield preprocess_sample
def get_test_batches(self, total_batches=-1, augment_images=False):
if total_batches==-1:
self.dataset.data_length = self.full_data_length
else:
self.dataset.data_length['test'] = total_batches * self.dataset.batch_size
self.dataset.switch_set(dataset_name="test")
self.dataset.set_augmentation(augment_images=augment_images)
for sample_id, sample_batched in enumerate(self.get_dataloader(shuffle=False)):
preprocess_sample = self.sample_iter_data(sample=sample_batched, num_gpus=self.dataset.num_of_gpus,
samples_per_iter=self.batches_per_iter,
batch_size=self.dataset.batch_size)
yield preprocess_sample
def sample_iter_data(self, sample, num_gpus, batch_size, samples_per_iter):
output_sample = []
for key in sample.keys():
sample[key] = np.array(sample[key].numpy(), dtype=np.float32)
new_shape = []
curr_id = 1
for i in range(len(sample[key].shape) + 2):
if i == 0:
new_shape.append(samples_per_iter)
elif i == 1:
new_shape.append(num_gpus)
elif i == 2:
new_shape.append(batch_size)
else:
new_shape.append(sample[key].shape[curr_id])
curr_id += 1
output_sample.append(np.reshape(sample[key], newshape=new_shape))
return output_sample
class FolderMatchingNetworkDatasetParallel(MatchingNetworkDatasetParallel):
def __init__(self, name, num_of_gpus, batch_size, image_height, image_width, image_channels,
train_val_test_split, data_path, indexes_of_folders_indicating_class, reset_stored_filepaths,
num_samples_per_class, num_classes_per_set, labels_as_int, reverse_channels):
super(FolderMatchingNetworkDatasetParallel, self).__init__(
batch_size=batch_size, reverse_channels=reverse_channels,
num_of_gpus=num_of_gpus, image_height=image_height,
image_width=image_width, image_channels=image_channels,
train_val_test_split=train_val_test_split, reset_stored_filepaths=reset_stored_filepaths,
num_classes_per_set=num_classes_per_set, num_samples_per_class=num_samples_per_class,
labels_as_int=labels_as_int, data_path=os.path.abspath(data_path), dataset_name=name,
indexes_of_folders_indicating_class=indexes_of_folders_indicating_class)
class FolderDatasetLoader(MatchingNetworkLoader):
def __init__(self, name, batch_size, image_height, image_width, image_channels, data_path, train_val_test_split,
num_of_gpus=1, samples_per_iter=1, num_workers=4, indexes_of_folders_indicating_class=[-2],
reset_stored_filepaths=False, num_samples_per_class=1, num_classes_per_set=20, reverse_channels=False,
seed=100, label_as_int=False):
self.name = name
self.indexes_of_folders_indicating_class = indexes_of_folders_indicating_class
self.reset_stored_filepaths = reset_stored_filepaths
super(FolderDatasetLoader, self).__init__(name, num_of_gpus, batch_size, image_height, image_width, image_channels, num_classes_per_set, data_path,
num_samples_per_class, train_val_test_split,
samples_per_iter, num_workers, reverse_channels, seed, labels_as_int=label_as_int)
def get_dataset(self, batch_size, reverse_channels, num_of_gpus, image_height, image_width, image_channels,
train_val_test_split, num_classes_per_set, num_samples_per_class, seed,
reset_stored_filepaths, data_path, labels_as_int):
return FolderMatchingNetworkDatasetParallel(name=self.name, num_of_gpus=num_of_gpus, batch_size=batch_size,
image_height=image_height, image_width=image_width,
image_channels=image_channels,
train_val_test_split=train_val_test_split, data_path=data_path,
indexes_of_folders_indicating_class=self.indexes_of_folders_indicating_class,
reset_stored_filepaths=self.reset_stored_filepaths,
num_samples_per_class=num_samples_per_class,
num_classes_per_set=num_classes_per_set, labels_as_int=labels_as_int,
reverse_channels=reverse_channels)
|
en
| 0.780753
|
:param batch_size: The batch size to use for the data loader :param last_training_class_index: The final index for the training set, used to restrict the training set if needed. E.g. if training set is 1200 classes and last_training_class_index=900 then only the first 900 classes will be used :param reverse_channels: A boolean indicating whether we need to reverse the colour channels e.g. RGB to BGR :param num_of_gpus: Number of gpus to use for training :param gen_batches: How many batches to use from the validation set for the end of epoch generations #print(self.datasets) # data_image_paths = self.shuffle(data_image_paths) # with tqdm.tqdm(total=len(data_image_path_list_raw)) as pbar_error: # with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor: # # Process the list of files, but split the work across the process pool to use all CPUs! # for image_file in executor.map(self.load_test_image, (data_image_path_list_raw)): # pbar_error.update(1) # if image_file is not None: # label = self.get_label_from_path(image_file) # data_image_path_dict[label_name_to_idx[label]].append(image_file) Preprocesses data such that their values lie in the -1.0 to 1.0 range so that the tanh activation gen output can work properly :param x: A data batch to preprocess :return: A preprocessed data batch # print(x.mean(), x.min(), x.max()) Applies the reverse operations that preprocess_data() applies such that the data returns to their original form :param x: A batch of data to reconstruct :return: A reconstructed batch of data Shuffles the data batch along it's first axis :param x: A data batch :return: A shuffled data batch Generates a data batch to be used for training or evaluation :param set_name: The name of the set to use, e.g. "train", "val" etc :return: A data batch
| 2.844445
| 3
|
webstore/webstore/databases.py
|
AlbertoAdolfo27/webstore-python
| 0
|
6625895
|
from pathlib import Path
class Databases:
BASE_DIR = Path(__file__).resolve().parent.parent
SQLITE3 = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
MYSQL = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'webstore',
'USER': 'root',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '3306',
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
},
},
}
|
from pathlib import Path
class Databases:
BASE_DIR = Path(__file__).resolve().parent.parent
SQLITE3 = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
MYSQL = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'webstore',
'USER': 'root',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '3306',
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
},
},
}
|
none
| 1
| 2.007681
| 2
|
|
windows/win32/paramiko-1.14.0/paramiko/kex_group1.py
|
mytliulei/DCNRobotInstallPackages
| 2
|
6625896
|
<gh_stars>1-10
# Copyright (C) 2003-2007 <NAME> <<EMAIL>>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of
1024 bit key halves, using a known "p" prime and "g" generator.
"""
import os
from hashlib import sha1
from paramiko import util
from paramiko.common import max_byte, zero_byte
from paramiko.message import Message
from paramiko.py3compat import byte_chr, long, byte_mask
from paramiko.ssh_exception import SSHException
_MSG_KEXDH_INIT, _MSG_KEXDH_REPLY = range(30, 32)
c_MSG_KEXDH_INIT, c_MSG_KEXDH_REPLY = [byte_chr(c) for c in range(30, 32)]
# draft-ietf-secsh-transport-09.txt, page 17
P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
G = 2
b7fffffffffffffff = byte_chr(0x7f) + max_byte * 7
b0000000000000000 = zero_byte * 8
class KexGroup1(object):
name = 'diffie-hellman-group1-sha1'
def __init__(self, transport):
self.transport = transport
self.x = long(0)
self.e = long(0)
self.f = long(0)
def start_kex(self):
self._generate_x()
if self.transport.server_mode:
# compute f = g^x mod p, but don't send it yet
self.f = pow(G, self.x, P)
self.transport._expect_packet(_MSG_KEXDH_INIT)
return
# compute e = g^x mod p (where g=2), and send it
self.e = pow(G, self.x, P)
m = Message()
m.add_byte(c_MSG_KEXDH_INIT)
m.add_mpint(self.e)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_REPLY)
def parse_next(self, ptype, m):
if self.transport.server_mode and (ptype == _MSG_KEXDH_INIT):
return self._parse_kexdh_init(m)
elif not self.transport.server_mode and (ptype == _MSG_KEXDH_REPLY):
return self._parse_kexdh_reply(m)
raise SSHException('KexGroup1 asked to handle packet type %d' % ptype)
### internals...
def _generate_x(self):
# generate an "x" (1 < x < q), where q is (p-1)/2.
# p is a 128-byte (1024-bit) number, where the first 64 bits are 1.
# therefore q can be approximated as a 2^1023. we drop the subset of
# potential x where the first 63 bits are 1, because some of those will be
# larger than q (but this is a tiny tiny subset of potential x).
while 1:
x_bytes = os.urandom(128)
x_bytes = byte_mask(x_bytes[0], 0x7f) + x_bytes[1:]
if (x_bytes[:8] != b7fffffffffffffff and
x_bytes[:8] != b0000000000000000):
break
self.x = util.inflate_long(x_bytes)
def _parse_kexdh_reply(self, m):
# client mode
host_key = m.get_string()
self.f = m.get_mpint()
if (self.f < 1) or (self.f > P - 1):
raise SSHException('Server kex "f" is out of range')
sig = m.get_binary()
K = pow(self.f, self.x, P)
# okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(self.transport.local_version, self.transport.remote_version,
self.transport.local_kex_init, self.transport.remote_kex_init)
hm.add_string(host_key)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
self.transport._set_K_H(K, sha1(hm.asbytes()).digest())
self.transport._verify_key(host_key, sig)
self.transport._activate_outbound()
def _parse_kexdh_init(self, m):
# server mode
self.e = m.get_mpint()
if (self.e < 1) or (self.e > P - 1):
raise SSHException('Client kex "e" is out of range')
K = pow(self.e, self.x, P)
key = self.transport.get_server_key().asbytes()
# okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(self.transport.remote_version, self.transport.local_version,
self.transport.remote_kex_init, self.transport.local_kex_init)
hm.add_string(key)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
H = sha1(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
# sign it
sig = self.transport.get_server_key().sign_ssh_data(H)
# send reply
m = Message()
m.add_byte(c_MSG_KEXDH_REPLY)
m.add_string(key)
m.add_mpint(self.f)
m.add_string(sig)
self.transport._send_message(m)
self.transport._activate_outbound()
|
# Copyright (C) 2003-2007 <NAME> <<EMAIL>>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of
1024 bit key halves, using a known "p" prime and "g" generator.
"""
import os
from hashlib import sha1
from paramiko import util
from paramiko.common import max_byte, zero_byte
from paramiko.message import Message
from paramiko.py3compat import byte_chr, long, byte_mask
from paramiko.ssh_exception import SSHException
_MSG_KEXDH_INIT, _MSG_KEXDH_REPLY = range(30, 32)
c_MSG_KEXDH_INIT, c_MSG_KEXDH_REPLY = [byte_chr(c) for c in range(30, 32)]
# draft-ietf-secsh-transport-09.txt, page 17
P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
G = 2
b7fffffffffffffff = byte_chr(0x7f) + max_byte * 7
b0000000000000000 = zero_byte * 8
class KexGroup1(object):
name = 'diffie-hellman-group1-sha1'
def __init__(self, transport):
self.transport = transport
self.x = long(0)
self.e = long(0)
self.f = long(0)
def start_kex(self):
self._generate_x()
if self.transport.server_mode:
# compute f = g^x mod p, but don't send it yet
self.f = pow(G, self.x, P)
self.transport._expect_packet(_MSG_KEXDH_INIT)
return
# compute e = g^x mod p (where g=2), and send it
self.e = pow(G, self.x, P)
m = Message()
m.add_byte(c_MSG_KEXDH_INIT)
m.add_mpint(self.e)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_REPLY)
def parse_next(self, ptype, m):
if self.transport.server_mode and (ptype == _MSG_KEXDH_INIT):
return self._parse_kexdh_init(m)
elif not self.transport.server_mode and (ptype == _MSG_KEXDH_REPLY):
return self._parse_kexdh_reply(m)
raise SSHException('KexGroup1 asked to handle packet type %d' % ptype)
### internals...
def _generate_x(self):
# generate an "x" (1 < x < q), where q is (p-1)/2.
# p is a 128-byte (1024-bit) number, where the first 64 bits are 1.
# therefore q can be approximated as a 2^1023. we drop the subset of
# potential x where the first 63 bits are 1, because some of those will be
# larger than q (but this is a tiny tiny subset of potential x).
while 1:
x_bytes = os.urandom(128)
x_bytes = byte_mask(x_bytes[0], 0x7f) + x_bytes[1:]
if (x_bytes[:8] != b7fffffffffffffff and
x_bytes[:8] != b0000000000000000):
break
self.x = util.inflate_long(x_bytes)
def _parse_kexdh_reply(self, m):
# client mode
host_key = m.get_string()
self.f = m.get_mpint()
if (self.f < 1) or (self.f > P - 1):
raise SSHException('Server kex "f" is out of range')
sig = m.get_binary()
K = pow(self.f, self.x, P)
# okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(self.transport.local_version, self.transport.remote_version,
self.transport.local_kex_init, self.transport.remote_kex_init)
hm.add_string(host_key)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
self.transport._set_K_H(K, sha1(hm.asbytes()).digest())
self.transport._verify_key(host_key, sig)
self.transport._activate_outbound()
def _parse_kexdh_init(self, m):
# server mode
self.e = m.get_mpint()
if (self.e < 1) or (self.e > P - 1):
raise SSHException('Client kex "e" is out of range')
K = pow(self.e, self.x, P)
key = self.transport.get_server_key().asbytes()
# okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(self.transport.remote_version, self.transport.local_version,
self.transport.remote_kex_init, self.transport.local_kex_init)
hm.add_string(key)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
H = sha1(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
# sign it
sig = self.transport.get_server_key().sign_ssh_data(H)
# send reply
m = Message()
m.add_byte(c_MSG_KEXDH_REPLY)
m.add_string(key)
m.add_mpint(self.f)
m.add_string(sig)
self.transport._send_message(m)
self.transport._activate_outbound()
|
en
| 0.815923
|
# Copyright (C) 2003-2007 <NAME> <<EMAIL>> # # This file is part of paramiko. # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of 1024 bit key halves, using a known "p" prime and "g" generator. # draft-ietf-secsh-transport-09.txt, page 17 # compute f = g^x mod p, but don't send it yet # compute e = g^x mod p (where g=2), and send it ### internals... # generate an "x" (1 < x < q), where q is (p-1)/2. # p is a 128-byte (1024-bit) number, where the first 64 bits are 1. # therefore q can be approximated as a 2^1023. we drop the subset of # potential x where the first 63 bits are 1, because some of those will be # larger than q (but this is a tiny tiny subset of potential x). # client mode # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K) # server mode # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K) # sign it # send reply
| 2.256977
| 2
|
Chapter09/production/tensorflow_serving/apis/input_pb2.py
|
PacktPublishing/Machine-Learning-with-TensorFlow-1.x
| 12
|
6625897
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow_serving/apis/input.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.example import example_pb2 as tensorflow_dot_core_dot_example_dot_example__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow_serving/apis/input.proto',
package='tensorflow.serving',
syntax='proto3',
serialized_pb=_b('\n#tensorflow_serving/apis/input.proto\x12\x12tensorflow.serving\x1a%tensorflow/core/example/example.proto\"4\n\x0b\x45xampleList\x12%\n\x08\x65xamples\x18\x01 \x03(\x0b\x32\x13.tensorflow.Example\"e\n\x16\x45xampleListWithContext\x12%\n\x08\x65xamples\x18\x01 \x03(\x0b\x32\x13.tensorflow.Example\x12$\n\x07\x63ontext\x18\x02 \x01(\x0b\x32\x13.tensorflow.Example\"\x99\x01\n\x05Input\x12\x37\n\x0c\x65xample_list\x18\x01 \x01(\x0b\x32\x1f.tensorflow.serving.ExampleListH\x00\x12O\n\x19\x65xample_list_with_context\x18\x02 \x01(\x0b\x32*.tensorflow.serving.ExampleListWithContextH\x00\x42\x06\n\x04kindB\x03\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_example_dot_example__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EXAMPLELIST = _descriptor.Descriptor(
name='ExampleList',
full_name='tensorflow.serving.ExampleList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='examples', full_name='tensorflow.serving.ExampleList.examples', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=150,
)
_EXAMPLELISTWITHCONTEXT = _descriptor.Descriptor(
name='ExampleListWithContext',
full_name='tensorflow.serving.ExampleListWithContext',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='examples', full_name='tensorflow.serving.ExampleListWithContext.examples', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='context', full_name='tensorflow.serving.ExampleListWithContext.context', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=152,
serialized_end=253,
)
_INPUT = _descriptor.Descriptor(
name='Input',
full_name='tensorflow.serving.Input',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='example_list', full_name='tensorflow.serving.Input.example_list', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='example_list_with_context', full_name='tensorflow.serving.Input.example_list_with_context', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='kind', full_name='tensorflow.serving.Input.kind',
index=0, containing_type=None, fields=[]),
],
serialized_start=256,
serialized_end=409,
)
_EXAMPLELIST.fields_by_name['examples'].message_type = tensorflow_dot_core_dot_example_dot_example__pb2._EXAMPLE
_EXAMPLELISTWITHCONTEXT.fields_by_name['examples'].message_type = tensorflow_dot_core_dot_example_dot_example__pb2._EXAMPLE
_EXAMPLELISTWITHCONTEXT.fields_by_name['context'].message_type = tensorflow_dot_core_dot_example_dot_example__pb2._EXAMPLE
_INPUT.fields_by_name['example_list'].message_type = _EXAMPLELIST
_INPUT.fields_by_name['example_list_with_context'].message_type = _EXAMPLELISTWITHCONTEXT
_INPUT.oneofs_by_name['kind'].fields.append(
_INPUT.fields_by_name['example_list'])
_INPUT.fields_by_name['example_list'].containing_oneof = _INPUT.oneofs_by_name['kind']
_INPUT.oneofs_by_name['kind'].fields.append(
_INPUT.fields_by_name['example_list_with_context'])
_INPUT.fields_by_name['example_list_with_context'].containing_oneof = _INPUT.oneofs_by_name['kind']
DESCRIPTOR.message_types_by_name['ExampleList'] = _EXAMPLELIST
DESCRIPTOR.message_types_by_name['ExampleListWithContext'] = _EXAMPLELISTWITHCONTEXT
DESCRIPTOR.message_types_by_name['Input'] = _INPUT
ExampleList = _reflection.GeneratedProtocolMessageType('ExampleList', (_message.Message,), dict(
DESCRIPTOR = _EXAMPLELIST,
__module__ = 'tensorflow_serving.apis.input_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.ExampleList)
))
_sym_db.RegisterMessage(ExampleList)
ExampleListWithContext = _reflection.GeneratedProtocolMessageType('ExampleListWithContext', (_message.Message,), dict(
DESCRIPTOR = _EXAMPLELISTWITHCONTEXT,
__module__ = 'tensorflow_serving.apis.input_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.ExampleListWithContext)
))
_sym_db.RegisterMessage(ExampleListWithContext)
Input = _reflection.GeneratedProtocolMessageType('Input', (_message.Message,), dict(
DESCRIPTOR = _INPUT,
__module__ = 'tensorflow_serving.apis.input_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.Input)
))
_sym_db.RegisterMessage(Input)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\370\001\001'))
# @@protoc_insertion_point(module_scope)
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow_serving/apis/input.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.example import example_pb2 as tensorflow_dot_core_dot_example_dot_example__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow_serving/apis/input.proto',
package='tensorflow.serving',
syntax='proto3',
serialized_pb=_b('\n#tensorflow_serving/apis/input.proto\x12\x12tensorflow.serving\x1a%tensorflow/core/example/example.proto\"4\n\x0b\x45xampleList\x12%\n\x08\x65xamples\x18\x01 \x03(\x0b\x32\x13.tensorflow.Example\"e\n\x16\x45xampleListWithContext\x12%\n\x08\x65xamples\x18\x01 \x03(\x0b\x32\x13.tensorflow.Example\x12$\n\x07\x63ontext\x18\x02 \x01(\x0b\x32\x13.tensorflow.Example\"\x99\x01\n\x05Input\x12\x37\n\x0c\x65xample_list\x18\x01 \x01(\x0b\x32\x1f.tensorflow.serving.ExampleListH\x00\x12O\n\x19\x65xample_list_with_context\x18\x02 \x01(\x0b\x32*.tensorflow.serving.ExampleListWithContextH\x00\x42\x06\n\x04kindB\x03\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_example_dot_example__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EXAMPLELIST = _descriptor.Descriptor(
name='ExampleList',
full_name='tensorflow.serving.ExampleList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='examples', full_name='tensorflow.serving.ExampleList.examples', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=150,
)
_EXAMPLELISTWITHCONTEXT = _descriptor.Descriptor(
name='ExampleListWithContext',
full_name='tensorflow.serving.ExampleListWithContext',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='examples', full_name='tensorflow.serving.ExampleListWithContext.examples', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='context', full_name='tensorflow.serving.ExampleListWithContext.context', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=152,
serialized_end=253,
)
_INPUT = _descriptor.Descriptor(
name='Input',
full_name='tensorflow.serving.Input',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='example_list', full_name='tensorflow.serving.Input.example_list', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='example_list_with_context', full_name='tensorflow.serving.Input.example_list_with_context', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='kind', full_name='tensorflow.serving.Input.kind',
index=0, containing_type=None, fields=[]),
],
serialized_start=256,
serialized_end=409,
)
_EXAMPLELIST.fields_by_name['examples'].message_type = tensorflow_dot_core_dot_example_dot_example__pb2._EXAMPLE
_EXAMPLELISTWITHCONTEXT.fields_by_name['examples'].message_type = tensorflow_dot_core_dot_example_dot_example__pb2._EXAMPLE
_EXAMPLELISTWITHCONTEXT.fields_by_name['context'].message_type = tensorflow_dot_core_dot_example_dot_example__pb2._EXAMPLE
_INPUT.fields_by_name['example_list'].message_type = _EXAMPLELIST
_INPUT.fields_by_name['example_list_with_context'].message_type = _EXAMPLELISTWITHCONTEXT
_INPUT.oneofs_by_name['kind'].fields.append(
_INPUT.fields_by_name['example_list'])
_INPUT.fields_by_name['example_list'].containing_oneof = _INPUT.oneofs_by_name['kind']
_INPUT.oneofs_by_name['kind'].fields.append(
_INPUT.fields_by_name['example_list_with_context'])
_INPUT.fields_by_name['example_list_with_context'].containing_oneof = _INPUT.oneofs_by_name['kind']
DESCRIPTOR.message_types_by_name['ExampleList'] = _EXAMPLELIST
DESCRIPTOR.message_types_by_name['ExampleListWithContext'] = _EXAMPLELISTWITHCONTEXT
DESCRIPTOR.message_types_by_name['Input'] = _INPUT
ExampleList = _reflection.GeneratedProtocolMessageType('ExampleList', (_message.Message,), dict(
DESCRIPTOR = _EXAMPLELIST,
__module__ = 'tensorflow_serving.apis.input_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.ExampleList)
))
_sym_db.RegisterMessage(ExampleList)
ExampleListWithContext = _reflection.GeneratedProtocolMessageType('ExampleListWithContext', (_message.Message,), dict(
DESCRIPTOR = _EXAMPLELISTWITHCONTEXT,
__module__ = 'tensorflow_serving.apis.input_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.ExampleListWithContext)
))
_sym_db.RegisterMessage(ExampleListWithContext)
Input = _reflection.GeneratedProtocolMessageType('Input', (_message.Message,), dict(
DESCRIPTOR = _INPUT,
__module__ = 'tensorflow_serving.apis.input_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.Input)
))
_sym_db.RegisterMessage(Input)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\370\001\001'))
# @@protoc_insertion_point(module_scope)
|
en
| 0.204555
|
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: tensorflow_serving/apis/input.proto # @@protoc_insertion_point(imports) #tensorflow_serving/apis/input.proto\x12\x12tensorflow.serving\x1a%tensorflow/core/example/example.proto\"4\n\x0b\x45xampleList\x12%\n\x08\x65xamples\x18\x01 \x03(\x0b\x32\x13.tensorflow.Example\"e\n\x16\x45xampleListWithContext\x12%\n\x08\x65xamples\x18\x01 \x03(\x0b\x32\x13.tensorflow.Example\x12$\n\x07\x63ontext\x18\x02 \x01(\x0b\x32\x13.tensorflow.Example\"\x99\x01\n\x05Input\x12\x37\n\x0c\x65xample_list\x18\x01 \x01(\x0b\x32\x1f.tensorflow.serving.ExampleListH\x00\x12O\n\x19\x65xample_list_with_context\x18\x02 \x01(\x0b\x32*.tensorflow.serving.ExampleListWithContextH\x00\x42\x06\n\x04kindB\x03\xf8\x01\x01\x62\x06proto3') # @@protoc_insertion_point(class_scope:tensorflow.serving.ExampleList) # @@protoc_insertion_point(class_scope:tensorflow.serving.ExampleListWithContext) # @@protoc_insertion_point(class_scope:tensorflow.serving.Input) # @@protoc_insertion_point(module_scope)
| 1.127437
| 1
|
02_neural_networks/2-7_introduction_to_pytorch/2-7_simple_mlp/fmnist_mlp_optimizer_eval.py
|
vseib/deep-learning-nano-degree
| 1
|
6625898
|
<gh_stars>1-10
import torch
from torch import nn
from torch import optim
from torchvision import datasets, transforms
import torch.nn.functional as F
from datetime import datetime
time_format_dt = '%Y-%m-%d %H:%M:%S'
time_format_t = '%H:%M:%S'
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
])
# Download and load the training data
trainset = datasets.FashionMNIST('../F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
testset = datasets.FashionMNIST('../F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False)
# check for cuda
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using", device)
# Define the loss
criterion = nn.CrossEntropyLoss()
optimizer_list = ["SGD", "Adam"]
learnrate_list = [0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001]
for opt in optimizer_list:
for num, rate in enumerate(learnrate_list):
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10))
# move to gpu if available
model.to(device)
# define optimizer
if opt == "SGD":
optimizer = optim.SGD(model.parameters(), rate)
else:
optimizer = optim.Adam(model.parameters(), rate/10.0)
for param_group in optimizer.param_groups:
str_lr = param_group['lr']
print("-----------------------------------------")
print("Optimizer:", opt, "learning rate:", str_lr)
epochs = 20
train_losses, test_losses, test_acc = [None], [None], [None]
for e in range(epochs):
running_loss_train = 0
model.train()
for images, labels in trainloader:
# move to gpu if available
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
images = images.view(images.shape[0], -1)
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss_train += loss.item()
running_loss_test = 0
total_accuracy = 0
model.eval()
for images, labels in testloader:
# move to gpu if available
images, labels = images.to(device), labels.to(device)
images = images.view(images.shape[0], -1)
output = model.forward(images)
# get probabilities and compute accuracy
probs = F.softmax(output, dim=1)
top_probs, top_classes = probs.topk(1, dim=1)
equals = (top_classes == labels.view(*top_classes.shape))
accuracy = torch.mean(equals.type(torch.FloatTensor))
loss = criterion(output, labels)
running_loss_test += loss.item()
total_accuracy += accuracy
# store losses and accuracy
train_losses.append(running_loss_train / len(trainloader))
test_losses.append(running_loss_test / len(testloader))
test_acc.append(total_accuracy / len(testloader))
time_stamp = "["+datetime.now().strftime(time_format_dt)+"]"
print(time_stamp+"\t",
"Epoch: {}/{}".format(e+1, epochs)+"\t",
"Train Loss: {:.4f}".format(running_loss_train / len(trainloader))+"\t",
"Test Loss: {:.4f}".format(running_loss_test / len(testloader))+"\t",
"Accuracy: {:.4f}".format(total_accuracy / len(testloader)))
# plot training statistics
import matplotlib.pyplot as plt
plt.plot(train_losses, label="Train Loss")
plt.plot(test_losses, label="Test Loss")
plt.plot(test_acc, label="Accuracy")
plt.legend(frameon=False)
plt.savefig("fmnist_plots_"+opt+"_"+str(str_lr)+".pdf")
plt.savefig("fmnist_plots_"+opt+"_"+str(str_lr)+".png")
plt.clf()
|
import torch
from torch import nn
from torch import optim
from torchvision import datasets, transforms
import torch.nn.functional as F
from datetime import datetime
time_format_dt = '%Y-%m-%d %H:%M:%S'
time_format_t = '%H:%M:%S'
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
])
# Download and load the training data
trainset = datasets.FashionMNIST('../F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
testset = datasets.FashionMNIST('../F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False)
# check for cuda
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using", device)
# Define the loss
criterion = nn.CrossEntropyLoss()
optimizer_list = ["SGD", "Adam"]
learnrate_list = [0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001]
for opt in optimizer_list:
for num, rate in enumerate(learnrate_list):
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10))
# move to gpu if available
model.to(device)
# define optimizer
if opt == "SGD":
optimizer = optim.SGD(model.parameters(), rate)
else:
optimizer = optim.Adam(model.parameters(), rate/10.0)
for param_group in optimizer.param_groups:
str_lr = param_group['lr']
print("-----------------------------------------")
print("Optimizer:", opt, "learning rate:", str_lr)
epochs = 20
train_losses, test_losses, test_acc = [None], [None], [None]
for e in range(epochs):
running_loss_train = 0
model.train()
for images, labels in trainloader:
# move to gpu if available
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
images = images.view(images.shape[0], -1)
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss_train += loss.item()
running_loss_test = 0
total_accuracy = 0
model.eval()
for images, labels in testloader:
# move to gpu if available
images, labels = images.to(device), labels.to(device)
images = images.view(images.shape[0], -1)
output = model.forward(images)
# get probabilities and compute accuracy
probs = F.softmax(output, dim=1)
top_probs, top_classes = probs.topk(1, dim=1)
equals = (top_classes == labels.view(*top_classes.shape))
accuracy = torch.mean(equals.type(torch.FloatTensor))
loss = criterion(output, labels)
running_loss_test += loss.item()
total_accuracy += accuracy
# store losses and accuracy
train_losses.append(running_loss_train / len(trainloader))
test_losses.append(running_loss_test / len(testloader))
test_acc.append(total_accuracy / len(testloader))
time_stamp = "["+datetime.now().strftime(time_format_dt)+"]"
print(time_stamp+"\t",
"Epoch: {}/{}".format(e+1, epochs)+"\t",
"Train Loss: {:.4f}".format(running_loss_train / len(trainloader))+"\t",
"Test Loss: {:.4f}".format(running_loss_test / len(testloader))+"\t",
"Accuracy: {:.4f}".format(total_accuracy / len(testloader)))
# plot training statistics
import matplotlib.pyplot as plt
plt.plot(train_losses, label="Train Loss")
plt.plot(test_losses, label="Test Loss")
plt.plot(test_acc, label="Accuracy")
plt.legend(frameon=False)
plt.savefig("fmnist_plots_"+opt+"_"+str(str_lr)+".pdf")
plt.savefig("fmnist_plots_"+opt+"_"+str(str_lr)+".png")
plt.clf()
|
en
| 0.759283
|
# Define a transform to normalize the data # Download and load the training data # check for cuda # Define the loss # Build a feed-forward network # move to gpu if available # define optimizer # move to gpu if available # move to gpu if available # get probabilities and compute accuracy # store losses and accuracy # plot training statistics
| 2.588522
| 3
|
caffe2onnx/src/OPs/Pooling.py
|
LaudateCorpus1/caffe2onnx
| 0
|
6625899
|
<gh_stars>0
import numpy as np
import caffe2onnx.src.c2oObject as Node
##-----------------------------------------------------Pooling--------------------------------------------------##
# Get hyperparameters
def getPoolingAttri(layer):
# Pooled core size
kernel_shape = np.array([layer.pooling_param.kernel_size]*2).flatten().tolist()
if layer.pooling_param.kernel_size == []:
kernel_shape = [layer.pooling_param.kernel_h,layer.pooling_param.kernel_w]
# Step
strides = [1, 1] # the default is 1
if layer.pooling_param.stride != []:
strides = np.array([layer.pooling_param.stride]*2).flatten().tolist()
# Filling
pads = [0, 0, 0, 0] # the default is 0
# Here is the same as when convolution, if there is a pad, it is set according to its value
if layer.pooling_param.pad != []:
pads = np.array([layer.pooling_param.pad] * 4).flatten().tolist()
elif layer.pooling_param.pad_h != 0 or layer.pooling_param.pad_w != 0:
pads = [layer.pooling_param.pad_h,layer.pooling_param.pad_w,layer.pooling_param.pad_h,layer.pooling_param.pad_w]
# Hyperparameter dictionary
dict = {"kernel_shape":kernel_shape,
"strides":strides,
"pads":pads
}
return dict
# Calculate the output dimension
def getPoolingOutShape(input_shape,layer,dict, with_indices=False):
kernel_shape = dict["kernel_shape"]
pads = dict["pads"]
strides = dict["strides"]
# Calculate the output dimension, as with convolution, round up if it is non-integer
h = (input_shape[0][2] - kernel_shape[0] + pads[0] + pads[2])/strides[0] + 1
if h > int(h):
output_shape_h = int(h) + 1
pads[2] += 1
else:
output_shape_h = int(h)
w = (input_shape[0][3] - kernel_shape[1] + pads[1] + pads[3])/strides[1] + 1
if w > int(w):
output_shape_w = int(w) + 1
pads[3] += 1
else:
output_shape_w = int(w)
if kernel_shape[0] == 0:
output_shape_h,output_shape_w = (1,1)
if not with_indices:
output_shape = [[input_shape[0][0],input_shape[0][1],output_shape_h,output_shape_w]]
else:
output_shape = [[input_shape[0][0],input_shape[0][1],output_shape_h,output_shape_w], [input_shape[0][0],input_shape[0][1],output_shape_h,output_shape_w]]
return output_shape
# Build node
def createPooling(layer,nodename,inname,outname,input_shape):
dict = getPoolingAttri(layer)
with_indices = True if len(outname) == 2 else False
output_shape = getPoolingOutShape(input_shape,layer, dict, with_indices=with_indices)
# Judgment is the type of pooling, maximum pooling, average pooling
if layer.pooling_param.pool == 0:
if layer.pooling_param.global_pooling == True:
node = Node.c2oNode(layer, nodename, "GlobalMaxPool", inname, outname, input_shape, output_shape, dict={})
else:
node = Node.c2oNode(layer, nodename, "MaxPool", inname, outname, input_shape, output_shape, dict=dict)
elif layer.pooling_param.pool == 1:
if layer.pooling_param.global_pooling == True:
node = Node.c2oNode(layer, nodename, "GlobalAveragePool", inname, outname, input_shape, output_shape, dict={})
else:
node = Node.c2oNode(layer, nodename, "AveragePool", inname, outname, input_shape, output_shape, dict=dict)
# Layers [i] .pooling_param.pool == 2 is random pooling
print(nodename, " node construction completed")
return node
|
import numpy as np
import caffe2onnx.src.c2oObject as Node
##-----------------------------------------------------Pooling--------------------------------------------------##
# Get hyperparameters
def getPoolingAttri(layer):
# Pooled core size
kernel_shape = np.array([layer.pooling_param.kernel_size]*2).flatten().tolist()
if layer.pooling_param.kernel_size == []:
kernel_shape = [layer.pooling_param.kernel_h,layer.pooling_param.kernel_w]
# Step
strides = [1, 1] # the default is 1
if layer.pooling_param.stride != []:
strides = np.array([layer.pooling_param.stride]*2).flatten().tolist()
# Filling
pads = [0, 0, 0, 0] # the default is 0
# Here is the same as when convolution, if there is a pad, it is set according to its value
if layer.pooling_param.pad != []:
pads = np.array([layer.pooling_param.pad] * 4).flatten().tolist()
elif layer.pooling_param.pad_h != 0 or layer.pooling_param.pad_w != 0:
pads = [layer.pooling_param.pad_h,layer.pooling_param.pad_w,layer.pooling_param.pad_h,layer.pooling_param.pad_w]
# Hyperparameter dictionary
dict = {"kernel_shape":kernel_shape,
"strides":strides,
"pads":pads
}
return dict
# Calculate the output dimension
def getPoolingOutShape(input_shape,layer,dict, with_indices=False):
kernel_shape = dict["kernel_shape"]
pads = dict["pads"]
strides = dict["strides"]
# Calculate the output dimension, as with convolution, round up if it is non-integer
h = (input_shape[0][2] - kernel_shape[0] + pads[0] + pads[2])/strides[0] + 1
if h > int(h):
output_shape_h = int(h) + 1
pads[2] += 1
else:
output_shape_h = int(h)
w = (input_shape[0][3] - kernel_shape[1] + pads[1] + pads[3])/strides[1] + 1
if w > int(w):
output_shape_w = int(w) + 1
pads[3] += 1
else:
output_shape_w = int(w)
if kernel_shape[0] == 0:
output_shape_h,output_shape_w = (1,1)
if not with_indices:
output_shape = [[input_shape[0][0],input_shape[0][1],output_shape_h,output_shape_w]]
else:
output_shape = [[input_shape[0][0],input_shape[0][1],output_shape_h,output_shape_w], [input_shape[0][0],input_shape[0][1],output_shape_h,output_shape_w]]
return output_shape
# Build node
def createPooling(layer,nodename,inname,outname,input_shape):
dict = getPoolingAttri(layer)
with_indices = True if len(outname) == 2 else False
output_shape = getPoolingOutShape(input_shape,layer, dict, with_indices=with_indices)
# Judgment is the type of pooling, maximum pooling, average pooling
if layer.pooling_param.pool == 0:
if layer.pooling_param.global_pooling == True:
node = Node.c2oNode(layer, nodename, "GlobalMaxPool", inname, outname, input_shape, output_shape, dict={})
else:
node = Node.c2oNode(layer, nodename, "MaxPool", inname, outname, input_shape, output_shape, dict=dict)
elif layer.pooling_param.pool == 1:
if layer.pooling_param.global_pooling == True:
node = Node.c2oNode(layer, nodename, "GlobalAveragePool", inname, outname, input_shape, output_shape, dict={})
else:
node = Node.c2oNode(layer, nodename, "AveragePool", inname, outname, input_shape, output_shape, dict=dict)
# Layers [i] .pooling_param.pool == 2 is random pooling
print(nodename, " node construction completed")
return node
|
en
| 0.711432
|
##-----------------------------------------------------Pooling--------------------------------------------------## # Get hyperparameters # Pooled core size # Step # the default is 1 # Filling # the default is 0 # Here is the same as when convolution, if there is a pad, it is set according to its value # Hyperparameter dictionary # Calculate the output dimension # Calculate the output dimension, as with convolution, round up if it is non-integer # Build node # Judgment is the type of pooling, maximum pooling, average pooling # Layers [i] .pooling_param.pool == 2 is random pooling
| 2.831077
| 3
|
tests/integration_tests/test_keras.py
|
seungjaeryanlee/optuna
| 0
|
6625900
|
from keras.layers import Dense
from keras import Sequential
import numpy as np
import pytest
import optuna
from optuna.integration import KerasPruningCallback
from optuna.testing.integration import create_running_trial
from optuna.testing.integration import DeterministicPruner
@pytest.mark.parametrize("interval, epochs", [(1, 1), (2, 1), (2, 2)])
def test_keras_pruning_callback(interval, epochs):
# type: (int, int) -> None
def objective(trial):
# type: (optuna.trial.Trial) -> float
model = Sequential()
model.add(Dense(1, activation="sigmoid", input_dim=20))
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
model.fit(
np.zeros((16, 20), np.float32),
np.zeros((16,), np.int32),
batch_size=1,
epochs=epochs,
callbacks=[KerasPruningCallback(trial, "accuracy", interval=interval)],
verbose=0,
)
return 1.0
study = optuna.create_study(pruner=DeterministicPruner(True))
study.optimize(objective, n_trials=1)
if interval <= epochs:
assert study.trials[0].state == optuna.trial.TrialState.PRUNED
else:
assert study.trials[0].state == optuna.trial.TrialState.COMPLETE
study = optuna.create_study(pruner=DeterministicPruner(False))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.trial.TrialState.COMPLETE
assert study.trials[0].value == 1.0
def test_keras_pruning_callback_observation_isnan():
# type: () -> None
study = optuna.create_study(pruner=DeterministicPruner(True))
trial = create_running_trial(study, 1.0)
callback = KerasPruningCallback(trial, "loss")
with pytest.raises(optuna.exceptions.TrialPruned):
callback.on_epoch_end(0, {"loss": 1.0})
with pytest.raises(optuna.exceptions.TrialPruned):
callback.on_epoch_end(0, {"loss": float("nan")})
|
from keras.layers import Dense
from keras import Sequential
import numpy as np
import pytest
import optuna
from optuna.integration import KerasPruningCallback
from optuna.testing.integration import create_running_trial
from optuna.testing.integration import DeterministicPruner
@pytest.mark.parametrize("interval, epochs", [(1, 1), (2, 1), (2, 2)])
def test_keras_pruning_callback(interval, epochs):
# type: (int, int) -> None
def objective(trial):
# type: (optuna.trial.Trial) -> float
model = Sequential()
model.add(Dense(1, activation="sigmoid", input_dim=20))
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
model.fit(
np.zeros((16, 20), np.float32),
np.zeros((16,), np.int32),
batch_size=1,
epochs=epochs,
callbacks=[KerasPruningCallback(trial, "accuracy", interval=interval)],
verbose=0,
)
return 1.0
study = optuna.create_study(pruner=DeterministicPruner(True))
study.optimize(objective, n_trials=1)
if interval <= epochs:
assert study.trials[0].state == optuna.trial.TrialState.PRUNED
else:
assert study.trials[0].state == optuna.trial.TrialState.COMPLETE
study = optuna.create_study(pruner=DeterministicPruner(False))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.trial.TrialState.COMPLETE
assert study.trials[0].value == 1.0
def test_keras_pruning_callback_observation_isnan():
# type: () -> None
study = optuna.create_study(pruner=DeterministicPruner(True))
trial = create_running_trial(study, 1.0)
callback = KerasPruningCallback(trial, "loss")
with pytest.raises(optuna.exceptions.TrialPruned):
callback.on_epoch_end(0, {"loss": 1.0})
with pytest.raises(optuna.exceptions.TrialPruned):
callback.on_epoch_end(0, {"loss": float("nan")})
|
en
| 0.613426
|
# type: (int, int) -> None # type: (optuna.trial.Trial) -> float # type: () -> None
| 2.245705
| 2
|
graphsaint/tensorflow_version/model.py
|
alexs131/GraphSAINT
| 316
|
6625901
|
<filename>graphsaint/tensorflow_version/model.py<gh_stars>100-1000
import tensorflow as tf
from graphsaint.globals import *
from graphsaint.tensorflow_version.inits import *
import graphsaint.tensorflow_version.layers as layers
from graphsaint.utils import *
import pdb
class GraphSAINT:
def __init__(self, num_classes, placeholders, features,
arch_gcn, train_params, adj_full_norm, **kwargs):
'''
Args:
- placeholders: TensorFlow placeholder object.
- features: Numpy array with node features.
- adj: Numpy array with adjacency lists (padded with random re-samples)
- degrees: Numpy array with node degrees.
- sigmoid_loss: Set to true if nodes can belong to multiple classes
'''
if "attention" in arch_gcn:
self.aggregator_cls = layers.AttentionAggregator
self.mulhead = int(arch_gcn['attention'])
else:
self.aggregator_cls = layers.HighOrderAggregator
self.mulhead = 1
self.lr = train_params['lr']
self.node_subgraph = placeholders['node_subgraph']
self.num_layers = len(arch_gcn['arch'].split('-'))
self.weight_decay = train_params['weight_decay']
self.jk = None if 'jk' not in arch_gcn else arch_gcn['jk']
self.arch_gcn = arch_gcn
self.adj_subgraph = placeholders['adj_subgraph']
# adj_subgraph_* are to store row-wise partitioned full graph adj tiles.
self.adj_subgraph_0=placeholders['adj_subgraph_0']
self.adj_subgraph_1=placeholders['adj_subgraph_1']
self.adj_subgraph_2=placeholders['adj_subgraph_2']
self.adj_subgraph_3=placeholders['adj_subgraph_3']
self.adj_subgraph_4=placeholders['adj_subgraph_4']
self.adj_subgraph_5=placeholders['adj_subgraph_5']
self.adj_subgraph_6=placeholders['adj_subgraph_6']
self.adj_subgraph_7=placeholders['adj_subgraph_7']
self.dim0_adj_sub = placeholders['dim0_adj_sub'] #adj_full_norm.shape[0]/8
self.features = tf.Variable(tf.constant(features, dtype=DTYPE), trainable=False)
self.dualGPU=args_global.dualGPU
_indices = np.column_stack(adj_full_norm.nonzero())
_data = adj_full_norm.data
_shape = adj_full_norm.shape
with tf.device('/cpu:0'):
self.adj_full_norm = tf.SparseTensorValue(_indices,_data,_shape)
self.num_classes = num_classes
self.sigmoid_loss = (arch_gcn['loss']=='sigmoid')
_dims,self.order_layer,self.act_layer,self.bias_layer,self.aggr_layer = parse_layer_yml(arch_gcn,features.shape[1])
# get layer index for each conv layer, useful for jk net last layer aggregation
self.set_idx_conv()
self.set_dims(_dims)
self.placeholders = placeholders
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.loss = 0
self.opt_op = None
self.norm_loss = placeholders['norm_loss']
self.is_train = placeholders['is_train']
self.build()
def set_dims(self,dims):
self.dims_feat = [dims[0]] + [((self.aggr_layer[l]=='concat')*self.order_layer[l]+1)*dims[l+1] for l in range(len(dims)-1)]
self.dims_weight = [(self.dims_feat[l],dims[l+1]) for l in range(len(dims)-1)]
def set_idx_conv(self):
idx_conv = np.where(np.array(self.order_layer)>=1)[0]
idx_conv = list(idx_conv[1:] - 1)
idx_conv.append(len(self.order_layer)-1)
_o_arr = np.array(self.order_layer)[idx_conv]
if np.prod(np.ediff1d(_o_arr)) == 0:
self.idx_conv = idx_conv
else:
self.idx_conv = list(np.where(np.array(self.order_layer)==1)[0])
def build(self):
"""
Build the sample graph with adj info in self.sample()
directly feed the sampled support vectors to tf placeholder
"""
self.aggregators = self.get_aggregators()
_outputs_l = self.aggregate_subgraph()
if self.jk == 'concat':
_dim_input_jk = np.array(self.dims_feat)[np.array(self.idx_conv)+1].sum()
else:
_dim_input_jk = self.dims_feat[-1]
self.layer_jk = layers.JumpingKnowledge(self.arch_gcn,_dim_input_jk,mode=self.jk)
self.outputs = self.layer_jk([_outputs_l, self.idx_conv])
# OUPTUT LAYER
self.outputs = tf.nn.l2_normalize(self.outputs, 1)
_dim_final = self.arch_gcn['dim'] if self.jk else self.dims_feat[-1]
self.layer_pred = layers.HighOrderAggregator(_dim_final,self.num_classes,act="I",\
order=0,dropout=self.placeholders["dropout"],bias="bias")
self.node_preds = self.layer_pred((self.outputs,None,None,None,None))
# BACK PROP
self._loss()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
grads_and_vars = self.optimizer.compute_gradients(self.loss)
clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
for grad, var in grads_and_vars]
self.grad, _ = clipped_grads_and_vars[0]
self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
self.preds = self.predict()
def _loss(self):
# these are all the trainable var
for aggregator in self.aggregators:
for var in aggregator.vars.values():
self.loss += self.weight_decay * tf.nn.l2_loss(var)
for var in self.layer_pred.vars.values():
self.loss += self.weight_decay * tf.nn.l2_loss(var)
# classification loss
f_loss = tf.nn.sigmoid_cross_entropy_with_logits if self.sigmoid_loss\
else tf.nn.softmax_cross_entropy_with_logits
# weighted loss due to bias in appearance of vertices
self.loss_terms = f_loss(logits=self.node_preds, labels=self.placeholders['labels'])
loss_terms_ndims = self.loss_terms.shape.ndims if self.loss_terms.shape is not None else None
if loss_terms_ndims == 1:
self.loss_terms = tf.reshape(self.loss_terms,(-1,1))
self._weight_loss_batch = tf.nn.embedding_lookup(self.norm_loss, self.node_subgraph)
_loss_terms_weight = tf.linalg.matmul(tf.transpose(self.loss_terms),\
tf.reshape(self._weight_loss_batch,(-1,1)))
self.loss += tf.reduce_sum(_loss_terms_weight)
tf.summary.scalar('loss', self.loss)
def predict(self):
return tf.nn.sigmoid(self.node_preds) if self.sigmoid_loss \
else tf.nn.softmax(self.node_preds)
def get_aggregators(self,name=None):
aggregators = []
for layer in range(self.num_layers):
aggregator = self.aggregator_cls(self.dims_weight[layer][0], self.dims_weight[layer][1],
dropout=self.placeholders['dropout'],name=name,
act=self.act_layer[layer],order=self.order_layer[layer],aggr=self.aggr_layer[layer],\
is_train=self.is_train,bias=self.bias_layer[layer],\
mulhead=self.mulhead)
aggregators.append(aggregator)
return aggregators
def aggregate_subgraph(self, batch_size=None, name=None, mode='train'):
if mode == 'train':
hidden = tf.nn.embedding_lookup(self.features, self.node_subgraph)
adj = self.adj_subgraph
else:
hidden = self.features
adj = self.adj_full_norm
ret_l = list()
_adj_partition_list = [self.adj_subgraph_0,self.adj_subgraph_1,self.adj_subgraph_2,self.adj_subgraph_3,
self.adj_subgraph_4,self.adj_subgraph_5,self.adj_subgraph_6,self.adj_subgraph_7]
if not args_global.dualGPU:
for layer in range(self.num_layers):
hidden = self.aggregators[layer]((hidden,adj,self.dims_feat[layer],_adj_partition_list,self.dim0_adj_sub))
ret_l.append(hidden)
else:
split=int(self.num_layers/2)
with tf.device('/gpu:0'):
for layer in range(split):
hidden = self.aggregators[layer]((hidden,adj,self.dims_feat[layer],_adj_partition_list,self.dim0_adj_sub))
ret_l.append(hidden)
with tf.device('/gpu:1'):
for layer in range(split,self.num_layers):
hidden = self.aggregators[layer]((hidden,adj,self.dims_feat[layer],_adj_partition_list,self.dim0_adj_sub))
ret_l.append(hidden)
return ret_l
|
<filename>graphsaint/tensorflow_version/model.py<gh_stars>100-1000
import tensorflow as tf
from graphsaint.globals import *
from graphsaint.tensorflow_version.inits import *
import graphsaint.tensorflow_version.layers as layers
from graphsaint.utils import *
import pdb
class GraphSAINT:
def __init__(self, num_classes, placeholders, features,
arch_gcn, train_params, adj_full_norm, **kwargs):
'''
Args:
- placeholders: TensorFlow placeholder object.
- features: Numpy array with node features.
- adj: Numpy array with adjacency lists (padded with random re-samples)
- degrees: Numpy array with node degrees.
- sigmoid_loss: Set to true if nodes can belong to multiple classes
'''
if "attention" in arch_gcn:
self.aggregator_cls = layers.AttentionAggregator
self.mulhead = int(arch_gcn['attention'])
else:
self.aggregator_cls = layers.HighOrderAggregator
self.mulhead = 1
self.lr = train_params['lr']
self.node_subgraph = placeholders['node_subgraph']
self.num_layers = len(arch_gcn['arch'].split('-'))
self.weight_decay = train_params['weight_decay']
self.jk = None if 'jk' not in arch_gcn else arch_gcn['jk']
self.arch_gcn = arch_gcn
self.adj_subgraph = placeholders['adj_subgraph']
# adj_subgraph_* are to store row-wise partitioned full graph adj tiles.
self.adj_subgraph_0=placeholders['adj_subgraph_0']
self.adj_subgraph_1=placeholders['adj_subgraph_1']
self.adj_subgraph_2=placeholders['adj_subgraph_2']
self.adj_subgraph_3=placeholders['adj_subgraph_3']
self.adj_subgraph_4=placeholders['adj_subgraph_4']
self.adj_subgraph_5=placeholders['adj_subgraph_5']
self.adj_subgraph_6=placeholders['adj_subgraph_6']
self.adj_subgraph_7=placeholders['adj_subgraph_7']
self.dim0_adj_sub = placeholders['dim0_adj_sub'] #adj_full_norm.shape[0]/8
self.features = tf.Variable(tf.constant(features, dtype=DTYPE), trainable=False)
self.dualGPU=args_global.dualGPU
_indices = np.column_stack(adj_full_norm.nonzero())
_data = adj_full_norm.data
_shape = adj_full_norm.shape
with tf.device('/cpu:0'):
self.adj_full_norm = tf.SparseTensorValue(_indices,_data,_shape)
self.num_classes = num_classes
self.sigmoid_loss = (arch_gcn['loss']=='sigmoid')
_dims,self.order_layer,self.act_layer,self.bias_layer,self.aggr_layer = parse_layer_yml(arch_gcn,features.shape[1])
# get layer index for each conv layer, useful for jk net last layer aggregation
self.set_idx_conv()
self.set_dims(_dims)
self.placeholders = placeholders
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.loss = 0
self.opt_op = None
self.norm_loss = placeholders['norm_loss']
self.is_train = placeholders['is_train']
self.build()
def set_dims(self,dims):
self.dims_feat = [dims[0]] + [((self.aggr_layer[l]=='concat')*self.order_layer[l]+1)*dims[l+1] for l in range(len(dims)-1)]
self.dims_weight = [(self.dims_feat[l],dims[l+1]) for l in range(len(dims)-1)]
def set_idx_conv(self):
idx_conv = np.where(np.array(self.order_layer)>=1)[0]
idx_conv = list(idx_conv[1:] - 1)
idx_conv.append(len(self.order_layer)-1)
_o_arr = np.array(self.order_layer)[idx_conv]
if np.prod(np.ediff1d(_o_arr)) == 0:
self.idx_conv = idx_conv
else:
self.idx_conv = list(np.where(np.array(self.order_layer)==1)[0])
def build(self):
"""
Build the sample graph with adj info in self.sample()
directly feed the sampled support vectors to tf placeholder
"""
self.aggregators = self.get_aggregators()
_outputs_l = self.aggregate_subgraph()
if self.jk == 'concat':
_dim_input_jk = np.array(self.dims_feat)[np.array(self.idx_conv)+1].sum()
else:
_dim_input_jk = self.dims_feat[-1]
self.layer_jk = layers.JumpingKnowledge(self.arch_gcn,_dim_input_jk,mode=self.jk)
self.outputs = self.layer_jk([_outputs_l, self.idx_conv])
# OUPTUT LAYER
self.outputs = tf.nn.l2_normalize(self.outputs, 1)
_dim_final = self.arch_gcn['dim'] if self.jk else self.dims_feat[-1]
self.layer_pred = layers.HighOrderAggregator(_dim_final,self.num_classes,act="I",\
order=0,dropout=self.placeholders["dropout"],bias="bias")
self.node_preds = self.layer_pred((self.outputs,None,None,None,None))
# BACK PROP
self._loss()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
grads_and_vars = self.optimizer.compute_gradients(self.loss)
clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
for grad, var in grads_and_vars]
self.grad, _ = clipped_grads_and_vars[0]
self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
self.preds = self.predict()
def _loss(self):
# these are all the trainable var
for aggregator in self.aggregators:
for var in aggregator.vars.values():
self.loss += self.weight_decay * tf.nn.l2_loss(var)
for var in self.layer_pred.vars.values():
self.loss += self.weight_decay * tf.nn.l2_loss(var)
# classification loss
f_loss = tf.nn.sigmoid_cross_entropy_with_logits if self.sigmoid_loss\
else tf.nn.softmax_cross_entropy_with_logits
# weighted loss due to bias in appearance of vertices
self.loss_terms = f_loss(logits=self.node_preds, labels=self.placeholders['labels'])
loss_terms_ndims = self.loss_terms.shape.ndims if self.loss_terms.shape is not None else None
if loss_terms_ndims == 1:
self.loss_terms = tf.reshape(self.loss_terms,(-1,1))
self._weight_loss_batch = tf.nn.embedding_lookup(self.norm_loss, self.node_subgraph)
_loss_terms_weight = tf.linalg.matmul(tf.transpose(self.loss_terms),\
tf.reshape(self._weight_loss_batch,(-1,1)))
self.loss += tf.reduce_sum(_loss_terms_weight)
tf.summary.scalar('loss', self.loss)
def predict(self):
return tf.nn.sigmoid(self.node_preds) if self.sigmoid_loss \
else tf.nn.softmax(self.node_preds)
def get_aggregators(self,name=None):
aggregators = []
for layer in range(self.num_layers):
aggregator = self.aggregator_cls(self.dims_weight[layer][0], self.dims_weight[layer][1],
dropout=self.placeholders['dropout'],name=name,
act=self.act_layer[layer],order=self.order_layer[layer],aggr=self.aggr_layer[layer],\
is_train=self.is_train,bias=self.bias_layer[layer],\
mulhead=self.mulhead)
aggregators.append(aggregator)
return aggregators
def aggregate_subgraph(self, batch_size=None, name=None, mode='train'):
if mode == 'train':
hidden = tf.nn.embedding_lookup(self.features, self.node_subgraph)
adj = self.adj_subgraph
else:
hidden = self.features
adj = self.adj_full_norm
ret_l = list()
_adj_partition_list = [self.adj_subgraph_0,self.adj_subgraph_1,self.adj_subgraph_2,self.adj_subgraph_3,
self.adj_subgraph_4,self.adj_subgraph_5,self.adj_subgraph_6,self.adj_subgraph_7]
if not args_global.dualGPU:
for layer in range(self.num_layers):
hidden = self.aggregators[layer]((hidden,adj,self.dims_feat[layer],_adj_partition_list,self.dim0_adj_sub))
ret_l.append(hidden)
else:
split=int(self.num_layers/2)
with tf.device('/gpu:0'):
for layer in range(split):
hidden = self.aggregators[layer]((hidden,adj,self.dims_feat[layer],_adj_partition_list,self.dim0_adj_sub))
ret_l.append(hidden)
with tf.device('/gpu:1'):
for layer in range(split,self.num_layers):
hidden = self.aggregators[layer]((hidden,adj,self.dims_feat[layer],_adj_partition_list,self.dim0_adj_sub))
ret_l.append(hidden)
return ret_l
|
en
| 0.778394
|
Args:
- placeholders: TensorFlow placeholder object.
- features: Numpy array with node features.
- adj: Numpy array with adjacency lists (padded with random re-samples)
- degrees: Numpy array with node degrees.
- sigmoid_loss: Set to true if nodes can belong to multiple classes # adj_subgraph_* are to store row-wise partitioned full graph adj tiles. #adj_full_norm.shape[0]/8 # get layer index for each conv layer, useful for jk net last layer aggregation Build the sample graph with adj info in self.sample()
directly feed the sampled support vectors to tf placeholder # OUPTUT LAYER # BACK PROP # these are all the trainable var # classification loss # weighted loss due to bias in appearance of vertices
| 2.30439
| 2
|
examples/server.py
|
insomniacslk/fbtftp
| 1
|
6625902
|
#!/usr/bin/env python3
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE-examples file in the root directory of this source tree.
import argparse
import logging
import os
from fbtftp.base_handler import BaseHandler
from fbtftp.base_handler import ResponseData
from fbtftp.base_server import BaseServer
class FileResponseData(ResponseData):
def __init__(self, path):
self._size = os.stat(path).st_size
self._reader = open(path, 'rb')
def read(self, n):
return self._reader.read(n)
def size(self):
return self._size
def close(self):
self._reader.close()
def print_session_stats(stats):
logging.info('Stats: for %r requesting %r' % (stats.peer, stats.file_path))
logging.info('Error: %r' % stats.error)
logging.info('Time spent: %dms' % (stats.duration() * 1e3))
logging.info('Packets sent: %d' % stats.packets_sent)
logging.info('Packets ACKed: %d' % stats.packets_acked)
logging.info('Bytes sent: %d' % stats.bytes_sent)
logging.info('Options: %r' % stats.options)
logging.info('Blksize: %r' % stats.blksize)
logging.info('Retransmits: %d' % stats.retransmits)
logging.info('Server port: %d' % stats.server_addr[1])
logging.info('Client port: %d' % stats.peer[1])
def print_server_stats(stats):
'''
Print server stats - see the ServerStats class
'''
# NOTE: remember to reset the counters you use, to allow the next cycle to
# start fresh
counters = stats.get_and_reset_all_counters()
logging.info('Server stats - every %d seconds' % stats.interval)
if 'process_count' in counters:
logging.info(
'Number of spawned TFTP workers in stats time frame : %d' %
counters['process_count']
)
class StaticHandler(BaseHandler):
def __init__(self, server_addr, peer, path, options, root, stats_callback):
self._root = root
super().__init__(server_addr, peer, path, options, stats_callback)
def get_response_data(self):
return FileResponseData(os.path.join(self._root, self._path))
class StaticServer(BaseServer):
def __init__(
self,
address,
port,
retries,
timeout,
root,
handler_stats_callback,
server_stats_callback=None
):
self._root = root
self._handler_stats_callback = handler_stats_callback
super().__init__(address, port, retries, timeout, server_stats_callback)
def get_handler(self, server_addr, peer, path, options):
return StaticHandler(
server_addr, peer, path, options, self._root,
self._handler_stats_callback
)
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
'--ip',
type=str,
default='::',
help='IP address to bind to'
)
parser.add_argument(
'--port',
type=int,
default=1969,
help='port to bind to'
)
parser.add_argument(
'--retries',
type=int,
default=5,
help='number of per-packet retries'
)
parser.add_argument(
'--timeout_s',
type=int,
default=2,
help='timeout for packet retransmission'
)
parser.add_argument(
'--root',
type=str,
default='',
help='root of the static filesystem'
)
return parser.parse_args()
def main():
args = get_arguments()
logging.getLogger().setLevel(logging.DEBUG)
server = StaticServer(
args.ip,
args.port,
args.retries,
args.timeout_s,
args.root,
print_session_stats,
print_server_stats,
)
try:
server.run()
except KeyboardInterrupt:
server.close()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE-examples file in the root directory of this source tree.
import argparse
import logging
import os
from fbtftp.base_handler import BaseHandler
from fbtftp.base_handler import ResponseData
from fbtftp.base_server import BaseServer
class FileResponseData(ResponseData):
def __init__(self, path):
self._size = os.stat(path).st_size
self._reader = open(path, 'rb')
def read(self, n):
return self._reader.read(n)
def size(self):
return self._size
def close(self):
self._reader.close()
def print_session_stats(stats):
logging.info('Stats: for %r requesting %r' % (stats.peer, stats.file_path))
logging.info('Error: %r' % stats.error)
logging.info('Time spent: %dms' % (stats.duration() * 1e3))
logging.info('Packets sent: %d' % stats.packets_sent)
logging.info('Packets ACKed: %d' % stats.packets_acked)
logging.info('Bytes sent: %d' % stats.bytes_sent)
logging.info('Options: %r' % stats.options)
logging.info('Blksize: %r' % stats.blksize)
logging.info('Retransmits: %d' % stats.retransmits)
logging.info('Server port: %d' % stats.server_addr[1])
logging.info('Client port: %d' % stats.peer[1])
def print_server_stats(stats):
'''
Print server stats - see the ServerStats class
'''
# NOTE: remember to reset the counters you use, to allow the next cycle to
# start fresh
counters = stats.get_and_reset_all_counters()
logging.info('Server stats - every %d seconds' % stats.interval)
if 'process_count' in counters:
logging.info(
'Number of spawned TFTP workers in stats time frame : %d' %
counters['process_count']
)
class StaticHandler(BaseHandler):
def __init__(self, server_addr, peer, path, options, root, stats_callback):
self._root = root
super().__init__(server_addr, peer, path, options, stats_callback)
def get_response_data(self):
return FileResponseData(os.path.join(self._root, self._path))
class StaticServer(BaseServer):
def __init__(
self,
address,
port,
retries,
timeout,
root,
handler_stats_callback,
server_stats_callback=None
):
self._root = root
self._handler_stats_callback = handler_stats_callback
super().__init__(address, port, retries, timeout, server_stats_callback)
def get_handler(self, server_addr, peer, path, options):
return StaticHandler(
server_addr, peer, path, options, self._root,
self._handler_stats_callback
)
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
'--ip',
type=str,
default='::',
help='IP address to bind to'
)
parser.add_argument(
'--port',
type=int,
default=1969,
help='port to bind to'
)
parser.add_argument(
'--retries',
type=int,
default=5,
help='number of per-packet retries'
)
parser.add_argument(
'--timeout_s',
type=int,
default=2,
help='timeout for packet retransmission'
)
parser.add_argument(
'--root',
type=str,
default='',
help='root of the static filesystem'
)
return parser.parse_args()
def main():
args = get_arguments()
logging.getLogger().setLevel(logging.DEBUG)
server = StaticServer(
args.ip,
args.port,
args.retries,
args.timeout_s,
args.root,
print_session_stats,
print_server_stats,
)
try:
server.run()
except KeyboardInterrupt:
server.close()
if __name__ == '__main__':
main()
|
en
| 0.817262
|
#!/usr/bin/env python3 # Copyright 2016-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE-examples file in the root directory of this source tree. Print server stats - see the ServerStats class # NOTE: remember to reset the counters you use, to allow the next cycle to # start fresh
| 2.182776
| 2
|
systemd_helper.py
|
CyberKoo/ClashMagicRouter
| 2
|
6625903
|
<gh_stars>1-10
import os
from command_helper import CommandHelper
class SystemdHelper:
@staticmethod
def service_exists(name):
search_path = SystemdHelper._get_search_path()
for path in search_path:
if os.path.exists('{}/{}.service'.format(path, name)):
return True
return False
@staticmethod
def _get_search_path():
try:
return CommandHelper.run_command('systemd-analyze', '--system unit-paths').split()
except:
return ['/usr/lib/systemd/system/', '/lib/systemd/system/', '/etc/systemd/system/']
|
import os
from command_helper import CommandHelper
class SystemdHelper:
@staticmethod
def service_exists(name):
search_path = SystemdHelper._get_search_path()
for path in search_path:
if os.path.exists('{}/{}.service'.format(path, name)):
return True
return False
@staticmethod
def _get_search_path():
try:
return CommandHelper.run_command('systemd-analyze', '--system unit-paths').split()
except:
return ['/usr/lib/systemd/system/', '/lib/systemd/system/', '/etc/systemd/system/']
|
none
| 1
| 2.710327
| 3
|
|
tests/test_claircli.py
|
joelee2012/claircli
| 16
|
6625904
|
<filename>tests/test_claircli.py
import json
import logging
import os
import shutil
import unittest
from argparse import Namespace
from collections import defaultdict
from os.path import isdir, isfile
import responses
from requests import get as req_get
from six.moves.urllib.parse import quote, urlencode
from claircli.clair import Clair
from claircli.cli import ClairCli
from claircli.docker_image import Image
from claircli.docker_registry import LocalRegistry, RemoteRegistry
from claircli.report import Report, WhiteList
try:
from unittest.mock import patch
except:
from mock import patch
logger = logging.getLogger(__name__)
class ClairCmdTestBase(unittest.TestCase):
def setUp(self):
self.name = 'registry.example.com/org/image-name:version'
self.reg = 'registry.example.com'
self.repo = 'org/image-name'
self.tag = 'version'
self.reg_url = 'https://%s/v2/' % self.reg
self.token_url = self.reg_url + 'token'
auth = 'Bearer realm="%s",service="%s"' % (self.token_url, self.reg)
self.headers = {'WWW-Authenticate': auth}
self.clair_url = 'http://mock_clair:6060'
params = {'service': self.reg,
'client_id': 'claircli',
'scope': 'repository:%s:pull' % self.repo}
self.token_url = self.token_url + '?' + urlencode(params)
self.manifest_url = self.reg_url + 'org/image-name/manifests/version'
responses.add(responses.GET, self.reg_url,
json={'message': 'authentication required'},
status=401, headers=self.headers)
responses.add(responses.GET, self.token_url,
json={'token': 'test-token'}, status=200)
with open('tests/test_data/manifest.v2.json') as f:
self.manifest = json.load(f)
responses.add(responses.GET, self.manifest_url,
json=self.manifest, status=200)
self.v1_analyze_url = '%s/v1/layers' % self.clair_url
self.layers = [e['digest'] for e in self.manifest['layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, self.layers[0]))
responses.add(responses.POST, self.v1_analyze_url)
with open('tests/test_data/origin_vulnerabilities.json') as f:
self.origin_data = json.load(f)
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, self.layers[-1]),
json=self.origin_data)
self.html = Report.get_report_path(self.name, '.html')
def tearDown(self):
RemoteRegistry.tokens = defaultdict(dict)
# if isfile(self.html):
# os.remove(self.html)
def assert_called_with_url(self):
self.assertEqual(responses.calls[0].request.url, self.reg_url)
self.assertEqual(
responses.calls[1].request.url, self.token_url)
self.assertEqual(
responses.calls[2].request.url, self.manifest_url)
def mock_docker_client(mock_docker):
mock_client = mock_docker.return_value
mock_image = mock_client.images.get.return_value
mock_image.save.return_value = open('tests/test_data/manifest.tar', 'r+b')
return mock_docker
class TestImage(ClairCmdTestBase):
def test_parse_image(self):
with open('tests/test_data/images.json') as f:
images = json.load(f)
for expected in images:
image = Image(expected['name'])
self.assertEqual(image.name, expected['name'])
self.assertEqual(image.repository, expected['repository'])
self.assertEqual(image.tag, expected['tag'])
self.assertEqual(str(image.registry), expected['registry'])
@responses.activate
def test_manifest(self):
image = Image(self.name)
self.assertEqual(image.manifest, self.manifest)
self.assert_called_with_url()
@responses.activate
def test_list_manifest(self):
with open('tests/test_data/manifest.list.v2.json') as f:
list_manifest = json.load(f)
responses.replace(responses.GET, self.manifest_url,
json=list_manifest, status=200)
image = Image(self.name)
self.assertEqual(image.manifest, list_manifest)
self.assert_called_with_url()
@responses.activate
def test_unsupported_manifest(self):
with open('tests/test_data/manifest.unsupported.json') as f:
manifest = json.load(f)
responses.replace(responses.GET, self.manifest_url,
json=manifest, status=200)
with self.assertRaises(ValueError):
image = Image(self.name)
image.layers
@patch('docker.from_env')
def test_manifest_local(self, mock_docker):
mock_docker_client(mock_docker)
registry = LocalRegistry('localhost')
image = Image(self.name, registry)
with open('tests/test_data/manifest.json') as file_:
manifest = json.load(file_)
self.assertEqual(image.manifest, manifest)
@patch('docker.from_env')
def test_layers_local(self, mock_docker):
mock_docker_client(mock_docker)
registry = LocalRegistry('localhost')
image = Image(self.name, registry)
with open('tests/test_data/manifest.json') as file_:
manifest = json.load(file_)
self.assertEqual(image.layers, [e.replace(
'/layer.tar', '') for e in manifest[0]['Layers']])
@responses.activate
def test_layers_v1(self):
with open('tests/test_data/manifest.v1.json') as f:
manifest = json.load(f)
responses.replace(responses.GET, self.manifest_url,
json=manifest, status=200)
image = Image(self.name)
self.assertEqual(image.layers, [e['blobSum']
for e in manifest['fsLayers']][::-1])
self.assert_called_with_url()
@responses.activate
def test_layers_v2(self):
image = Image(self.name)
self.assertEqual(image.layers,
[e['digest'] for e in self.manifest['layers']])
self.assert_called_with_url()
@responses.activate
def test_layers_list_v2(self):
list_image_manifest_url = self.reg_url + \
'org/image-name/manifests/sha256:d0fec089e611891a03f3282f10115bb186ed46093c3f083eceb250cee64b63eb'
with open('tests/test_data/manifest.list.v2.json') as f:
list_manifest = json.load(f)
with open('tests/test_data/manifest.list.v2-image.json') as f:
list_image_manifest = json.load(f)
responses.replace(responses.GET, self.manifest_url,
json=list_manifest, status=200)
responses.add(responses.GET, list_image_manifest_url,
json=list_image_manifest, status=200)
image = Image(self.name)
self.assertEqual(image.images[0].layers, [e['digest']
for e in list_image_manifest['layers']])
self.assertEqual(image.layers, [])
self.assert_called_with_url()
self.assertEqual(
responses.calls[3].request.url, list_image_manifest_url)
class TestClair(ClairCmdTestBase):
@responses.activate
def test_analyze_remote_image(self):
clair = Clair(self.clair_url)
image = Image(self.name)
layers = clair.analyze_image(image)
self.assertEqual(layers, self.layers)
self.assert_called_with_url()
for index, layer in enumerate(self.layers, start=4):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertEqual(req_body['Layer']['Path'],
image.registry.get_blobs_url(image, layer))
@patch('docker.from_env')
@responses.activate
def test_analyze_local_image(self, mock_docker):
mock_docker_client(mock_docker)
clair = Clair(self.clair_url)
registry = LocalRegistry('localhost')
image = Image(self.name, registry)
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, image.layers[0]))
layers = clair.analyze_image(image)
self.assertEqual(layers, image.layers)
for index, layer in enumerate(layers, start=1):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertEqual(req_body['Layer']['Path'],
image.registry.get_blobs_url(image, layer))
class TestClairCli(ClairCmdTestBase):
def test_read_white_list(self):
white_list = WhiteList('tests/test_data/example-whitelist.yaml')
self.assertEqual(white_list.get('common'), {
'CVE-2017-6055': 'XML',
'CVE-2017-5586': 'OpenText'})
self.assertEqual(white_list.get('alpine'), {
'CVE-2017-6055': 'XML',
'CVE-2017-5586': 'OpenText',
'CVE-2017-3261': 'SE'})
self.assertEqual(white_list.get('ubuntu'), {
'CVE-2017-6055': 'XML',
'CVE-2017-5586': 'OpenText',
'CVE-2017-5230': 'XSX'})
self.assertEqual(white_list.get('centos'), {
'CVE-2017-6055': 'XML',
'CVE-2017-5586': 'OpenText'})
@responses.activate
def test_analyze_images(self):
with patch('sys.argv', ['claircli', '-d', '-c',
self.clair_url, self.name]):
cli = ClairCli()
cli.run()
self.assert_called_with_url()
for index, layer in enumerate(self.layers, start=4):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertTrue(isfile(self.html))
@responses.activate
def test_analyze_images_in_insecure_registry(self):
reg_url = 'http://%s/v2/' % self.reg
token_url = reg_url + 'token'
auth = 'Bearer realm="%s",service="%s"' % (token_url, self.reg)
headers = {'WWW-Authenticate': auth}
params = {'service': self.reg,
'client_id': 'claircli',
'scope': 'repository:%s:pull' % self.repo}
token_url = token_url + '?' + urlencode(params)
manifest_url = reg_url + 'org/image-name/manifests/version'
responses.reset()
responses.add(responses.GET, reg_url,
json={'message': 'authentication required'},
status=401, headers=headers)
responses.add(responses.GET, token_url,
json={'token': 'test-token'}, status=200)
responses.add(responses.GET, manifest_url,
json=self.manifest, status=200)
self.layers = [e['digest'] for e in self.manifest['layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, self.layers[0]))
responses.add(responses.POST, self.v1_analyze_url)
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, self.layers[-1]),
json=self.origin_data)
with patch('sys.argv', ['claircli', '-c',
self.clair_url, '-i', self.reg, self.name]):
cli = ClairCli()
cli.run()
for index, url in enumerate([reg_url, token_url, manifest_url]):
self.assertEqual(responses.calls[index].request.url, url)
for index, layer in enumerate(self.layers, start=4):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertTrue(isfile(self.html))
self.assertIn(self.reg, RemoteRegistry.insec_regs)
@responses.activate
def test_analyze_images_in_secure_registry(self):
reg_url = 'https://%s/v2/' % self.reg
token = 'just-<PASSWORD>'
auth = 'Basic %s' % token
headers = {'WWW-Authenticate': auth}
manifest_url = reg_url + 'org/image-name/manifests/version'
responses.reset()
responses.add(responses.GET, manifest_url,
json=self.manifest, status=200, headers=headers)
self.layers = [e['digest'] for e in self.manifest['layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, self.layers[0]))
responses.add(responses.POST, self.v1_analyze_url)
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, self.layers[-1]),
json=self.origin_data)
with patch('sys.argv', ['claircli', '-c',
self.clair_url,
'-k', self.reg + ':' + token,
# Include a check for ignored arguments
'-k', '1234', '-k', 'ab:', '-k', ':',
self.name]):
cli = ClairCli()
cli.run()
for index, url in enumerate([manifest_url, ]):
self.assertEqual(responses.calls[index].request.url, url)
for index, layer in enumerate(self.layers, start=2):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertTrue(isfile(self.html))
self.assertEqual(0, len(RemoteRegistry.insec_regs))
self.assertIn(self.reg, RemoteRegistry.tokens)
self.assertIn('', RemoteRegistry.tokens[self.reg])
self.assertEqual(auth, RemoteRegistry.tokens[self.reg][''])
self.assertIn(self.repo, RemoteRegistry.tokens[self.reg])
self.assertEqual(auth, RemoteRegistry.tokens[self.reg][self.repo])
@patch('docker.from_env')
@responses.activate
def test_analyze_local_images(self, mock_docker):
mock_docker_client(mock_docker)
with open('tests/test_data/manifest.json') as file_:
manifest = json.load(file_)
layers = [e.replace('/layer.tar', '') for e in manifest[0]['Layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, layers[0]))
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, layers[-1]), json=self.origin_data)
with patch('sys.argv', ['claircli', '-l', 'localhost',
'-c', self.clair_url, self.name]):
cli = ClairCli()
cli.run()
for index, layer in enumerate(layers, start=1):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertTrue(isfile(self.html))
@responses.activate
def test_analyze_manifest_list(self):
list_image_manifest_url = self.reg_url + \
'org/image-name/manifests/sha256:d0fec089e611891a03f3282f10115bb186ed46093c3f083eceb250cee64b63eb'
with open('tests/test_data/manifest.list.v2.json') as f:
list_manifest = json.load(f)
with open('tests/test_data/manifest.list.v2-image.json') as f:
list_image_manifest = json.load(f)
with open('tests/test_data/origin_vulnerabilities_list.json') as f:
list_origin_data = json.load(f)
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, list_origin_data['Layer']['Name']),
json=list_origin_data)
responses.replace(responses.GET, self.manifest_url,
json=list_manifest, status=200)
responses.add(responses.GET, list_image_manifest_url,
json=list_image_manifest, status=200)
layers = [e['digest'] for e in list_image_manifest['layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, layers[0]))
for layer in layers:
responses.add(responses.GET, '%s/%s' %
(self.v1_analyze_url, layer))
with patch('sys.argv', ['claircli', '-d', '-c',
self.clair_url, self.name]):
cli = ClairCli()
cli.run()
image = Image(self.name)
self.assert_called_with_url()
for index, layer in enumerate(image.images[0].layers, start=5):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.html = Report.get_report_path('{}/{}@{}'.format(self.reg, self.repo, image.manifest['manifests'][0]['digest']), '.html')
self.assertTrue(isfile(self.html))
|
<filename>tests/test_claircli.py
import json
import logging
import os
import shutil
import unittest
from argparse import Namespace
from collections import defaultdict
from os.path import isdir, isfile
import responses
from requests import get as req_get
from six.moves.urllib.parse import quote, urlencode
from claircli.clair import Clair
from claircli.cli import ClairCli
from claircli.docker_image import Image
from claircli.docker_registry import LocalRegistry, RemoteRegistry
from claircli.report import Report, WhiteList
try:
from unittest.mock import patch
except:
from mock import patch
logger = logging.getLogger(__name__)
class ClairCmdTestBase(unittest.TestCase):
def setUp(self):
self.name = 'registry.example.com/org/image-name:version'
self.reg = 'registry.example.com'
self.repo = 'org/image-name'
self.tag = 'version'
self.reg_url = 'https://%s/v2/' % self.reg
self.token_url = self.reg_url + 'token'
auth = 'Bearer realm="%s",service="%s"' % (self.token_url, self.reg)
self.headers = {'WWW-Authenticate': auth}
self.clair_url = 'http://mock_clair:6060'
params = {'service': self.reg,
'client_id': 'claircli',
'scope': 'repository:%s:pull' % self.repo}
self.token_url = self.token_url + '?' + urlencode(params)
self.manifest_url = self.reg_url + 'org/image-name/manifests/version'
responses.add(responses.GET, self.reg_url,
json={'message': 'authentication required'},
status=401, headers=self.headers)
responses.add(responses.GET, self.token_url,
json={'token': 'test-token'}, status=200)
with open('tests/test_data/manifest.v2.json') as f:
self.manifest = json.load(f)
responses.add(responses.GET, self.manifest_url,
json=self.manifest, status=200)
self.v1_analyze_url = '%s/v1/layers' % self.clair_url
self.layers = [e['digest'] for e in self.manifest['layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, self.layers[0]))
responses.add(responses.POST, self.v1_analyze_url)
with open('tests/test_data/origin_vulnerabilities.json') as f:
self.origin_data = json.load(f)
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, self.layers[-1]),
json=self.origin_data)
self.html = Report.get_report_path(self.name, '.html')
def tearDown(self):
RemoteRegistry.tokens = defaultdict(dict)
# if isfile(self.html):
# os.remove(self.html)
def assert_called_with_url(self):
self.assertEqual(responses.calls[0].request.url, self.reg_url)
self.assertEqual(
responses.calls[1].request.url, self.token_url)
self.assertEqual(
responses.calls[2].request.url, self.manifest_url)
def mock_docker_client(mock_docker):
mock_client = mock_docker.return_value
mock_image = mock_client.images.get.return_value
mock_image.save.return_value = open('tests/test_data/manifest.tar', 'r+b')
return mock_docker
class TestImage(ClairCmdTestBase):
def test_parse_image(self):
with open('tests/test_data/images.json') as f:
images = json.load(f)
for expected in images:
image = Image(expected['name'])
self.assertEqual(image.name, expected['name'])
self.assertEqual(image.repository, expected['repository'])
self.assertEqual(image.tag, expected['tag'])
self.assertEqual(str(image.registry), expected['registry'])
@responses.activate
def test_manifest(self):
image = Image(self.name)
self.assertEqual(image.manifest, self.manifest)
self.assert_called_with_url()
@responses.activate
def test_list_manifest(self):
with open('tests/test_data/manifest.list.v2.json') as f:
list_manifest = json.load(f)
responses.replace(responses.GET, self.manifest_url,
json=list_manifest, status=200)
image = Image(self.name)
self.assertEqual(image.manifest, list_manifest)
self.assert_called_with_url()
@responses.activate
def test_unsupported_manifest(self):
with open('tests/test_data/manifest.unsupported.json') as f:
manifest = json.load(f)
responses.replace(responses.GET, self.manifest_url,
json=manifest, status=200)
with self.assertRaises(ValueError):
image = Image(self.name)
image.layers
@patch('docker.from_env')
def test_manifest_local(self, mock_docker):
mock_docker_client(mock_docker)
registry = LocalRegistry('localhost')
image = Image(self.name, registry)
with open('tests/test_data/manifest.json') as file_:
manifest = json.load(file_)
self.assertEqual(image.manifest, manifest)
@patch('docker.from_env')
def test_layers_local(self, mock_docker):
mock_docker_client(mock_docker)
registry = LocalRegistry('localhost')
image = Image(self.name, registry)
with open('tests/test_data/manifest.json') as file_:
manifest = json.load(file_)
self.assertEqual(image.layers, [e.replace(
'/layer.tar', '') for e in manifest[0]['Layers']])
@responses.activate
def test_layers_v1(self):
with open('tests/test_data/manifest.v1.json') as f:
manifest = json.load(f)
responses.replace(responses.GET, self.manifest_url,
json=manifest, status=200)
image = Image(self.name)
self.assertEqual(image.layers, [e['blobSum']
for e in manifest['fsLayers']][::-1])
self.assert_called_with_url()
@responses.activate
def test_layers_v2(self):
image = Image(self.name)
self.assertEqual(image.layers,
[e['digest'] for e in self.manifest['layers']])
self.assert_called_with_url()
@responses.activate
def test_layers_list_v2(self):
list_image_manifest_url = self.reg_url + \
'org/image-name/manifests/sha256:d0fec089e611891a03f3282f10115bb186ed46093c3f083eceb250cee64b63eb'
with open('tests/test_data/manifest.list.v2.json') as f:
list_manifest = json.load(f)
with open('tests/test_data/manifest.list.v2-image.json') as f:
list_image_manifest = json.load(f)
responses.replace(responses.GET, self.manifest_url,
json=list_manifest, status=200)
responses.add(responses.GET, list_image_manifest_url,
json=list_image_manifest, status=200)
image = Image(self.name)
self.assertEqual(image.images[0].layers, [e['digest']
for e in list_image_manifest['layers']])
self.assertEqual(image.layers, [])
self.assert_called_with_url()
self.assertEqual(
responses.calls[3].request.url, list_image_manifest_url)
class TestClair(ClairCmdTestBase):
@responses.activate
def test_analyze_remote_image(self):
clair = Clair(self.clair_url)
image = Image(self.name)
layers = clair.analyze_image(image)
self.assertEqual(layers, self.layers)
self.assert_called_with_url()
for index, layer in enumerate(self.layers, start=4):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertEqual(req_body['Layer']['Path'],
image.registry.get_blobs_url(image, layer))
@patch('docker.from_env')
@responses.activate
def test_analyze_local_image(self, mock_docker):
mock_docker_client(mock_docker)
clair = Clair(self.clair_url)
registry = LocalRegistry('localhost')
image = Image(self.name, registry)
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, image.layers[0]))
layers = clair.analyze_image(image)
self.assertEqual(layers, image.layers)
for index, layer in enumerate(layers, start=1):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertEqual(req_body['Layer']['Path'],
image.registry.get_blobs_url(image, layer))
class TestClairCli(ClairCmdTestBase):
def test_read_white_list(self):
white_list = WhiteList('tests/test_data/example-whitelist.yaml')
self.assertEqual(white_list.get('common'), {
'CVE-2017-6055': 'XML',
'CVE-2017-5586': 'OpenText'})
self.assertEqual(white_list.get('alpine'), {
'CVE-2017-6055': 'XML',
'CVE-2017-5586': 'OpenText',
'CVE-2017-3261': 'SE'})
self.assertEqual(white_list.get('ubuntu'), {
'CVE-2017-6055': 'XML',
'CVE-2017-5586': 'OpenText',
'CVE-2017-5230': 'XSX'})
self.assertEqual(white_list.get('centos'), {
'CVE-2017-6055': 'XML',
'CVE-2017-5586': 'OpenText'})
@responses.activate
def test_analyze_images(self):
with patch('sys.argv', ['claircli', '-d', '-c',
self.clair_url, self.name]):
cli = ClairCli()
cli.run()
self.assert_called_with_url()
for index, layer in enumerate(self.layers, start=4):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertTrue(isfile(self.html))
@responses.activate
def test_analyze_images_in_insecure_registry(self):
reg_url = 'http://%s/v2/' % self.reg
token_url = reg_url + 'token'
auth = 'Bearer realm="%s",service="%s"' % (token_url, self.reg)
headers = {'WWW-Authenticate': auth}
params = {'service': self.reg,
'client_id': 'claircli',
'scope': 'repository:%s:pull' % self.repo}
token_url = token_url + '?' + urlencode(params)
manifest_url = reg_url + 'org/image-name/manifests/version'
responses.reset()
responses.add(responses.GET, reg_url,
json={'message': 'authentication required'},
status=401, headers=headers)
responses.add(responses.GET, token_url,
json={'token': 'test-token'}, status=200)
responses.add(responses.GET, manifest_url,
json=self.manifest, status=200)
self.layers = [e['digest'] for e in self.manifest['layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, self.layers[0]))
responses.add(responses.POST, self.v1_analyze_url)
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, self.layers[-1]),
json=self.origin_data)
with patch('sys.argv', ['claircli', '-c',
self.clair_url, '-i', self.reg, self.name]):
cli = ClairCli()
cli.run()
for index, url in enumerate([reg_url, token_url, manifest_url]):
self.assertEqual(responses.calls[index].request.url, url)
for index, layer in enumerate(self.layers, start=4):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertTrue(isfile(self.html))
self.assertIn(self.reg, RemoteRegistry.insec_regs)
@responses.activate
def test_analyze_images_in_secure_registry(self):
reg_url = 'https://%s/v2/' % self.reg
token = 'just-<PASSWORD>'
auth = 'Basic %s' % token
headers = {'WWW-Authenticate': auth}
manifest_url = reg_url + 'org/image-name/manifests/version'
responses.reset()
responses.add(responses.GET, manifest_url,
json=self.manifest, status=200, headers=headers)
self.layers = [e['digest'] for e in self.manifest['layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, self.layers[0]))
responses.add(responses.POST, self.v1_analyze_url)
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, self.layers[-1]),
json=self.origin_data)
with patch('sys.argv', ['claircli', '-c',
self.clair_url,
'-k', self.reg + ':' + token,
# Include a check for ignored arguments
'-k', '1234', '-k', 'ab:', '-k', ':',
self.name]):
cli = ClairCli()
cli.run()
for index, url in enumerate([manifest_url, ]):
self.assertEqual(responses.calls[index].request.url, url)
for index, layer in enumerate(self.layers, start=2):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertTrue(isfile(self.html))
self.assertEqual(0, len(RemoteRegistry.insec_regs))
self.assertIn(self.reg, RemoteRegistry.tokens)
self.assertIn('', RemoteRegistry.tokens[self.reg])
self.assertEqual(auth, RemoteRegistry.tokens[self.reg][''])
self.assertIn(self.repo, RemoteRegistry.tokens[self.reg])
self.assertEqual(auth, RemoteRegistry.tokens[self.reg][self.repo])
@patch('docker.from_env')
@responses.activate
def test_analyze_local_images(self, mock_docker):
mock_docker_client(mock_docker)
with open('tests/test_data/manifest.json') as file_:
manifest = json.load(file_)
layers = [e.replace('/layer.tar', '') for e in manifest[0]['Layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, layers[0]))
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, layers[-1]), json=self.origin_data)
with patch('sys.argv', ['claircli', '-l', 'localhost',
'-c', self.clair_url, self.name]):
cli = ClairCli()
cli.run()
for index, layer in enumerate(layers, start=1):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.assertTrue(isfile(self.html))
@responses.activate
def test_analyze_manifest_list(self):
list_image_manifest_url = self.reg_url + \
'org/image-name/manifests/sha256:d0fec089e611891a03f3282f10115bb186ed46093c3f083eceb250cee64b63eb'
with open('tests/test_data/manifest.list.v2.json') as f:
list_manifest = json.load(f)
with open('tests/test_data/manifest.list.v2-image.json') as f:
list_image_manifest = json.load(f)
with open('tests/test_data/origin_vulnerabilities_list.json') as f:
list_origin_data = json.load(f)
responses.add(responses.GET, '%s/%s?features&vulnerabilities' %
(self.v1_analyze_url, list_origin_data['Layer']['Name']),
json=list_origin_data)
responses.replace(responses.GET, self.manifest_url,
json=list_manifest, status=200)
responses.add(responses.GET, list_image_manifest_url,
json=list_image_manifest, status=200)
layers = [e['digest'] for e in list_image_manifest['layers']]
responses.add(responses.DELETE, '%s/%s' %
(self.v1_analyze_url, layers[0]))
for layer in layers:
responses.add(responses.GET, '%s/%s' %
(self.v1_analyze_url, layer))
with patch('sys.argv', ['claircli', '-d', '-c',
self.clair_url, self.name]):
cli = ClairCli()
cli.run()
image = Image(self.name)
self.assert_called_with_url()
for index, layer in enumerate(image.images[0].layers, start=5):
self.assertEqual(
responses.calls[index].request.url, self.v1_analyze_url)
req_body = json.loads(responses.calls[index].request.body)
self.assertEqual(req_body['Layer']['Name'], layer)
self.html = Report.get_report_path('{}/{}@{}'.format(self.reg, self.repo, image.manifest['manifests'][0]['digest']), '.html')
self.assertTrue(isfile(self.html))
|
en
| 0.153258
|
# if isfile(self.html): # os.remove(self.html) # Include a check for ignored arguments
| 2.043989
| 2
|
locustfiles/loadtest.py
|
18F/identity-give-load-testin
| 0
|
6625905
|
<gh_stars>0
""" Locustfile for load testing IDVA functionality """
import os
from locust import HttpUser, task, constant_pacing, tag
HTTP_FLOW_PATH = f"/v1/company/wdK3fH48XuoXzvZyeNJEYFA9i8K72BZg/flows/IU1iDIvviIth5jiYmNvgsS43Kg29RxyB/start"
SK_API_KEY = os.getenv("SK_API_KEY")
class SKLoadTestUser(HttpUser):
"""Load test SK"""
wait_time = constant_pacing(1)
@task(1)
@tag("basic_load")
def test_flow(self):
"""Invoke basic sk test flow"""
self.client.post(HTTP_FLOW_PATH, headers={"x-sk-api-key": SK_API_KEY})
|
""" Locustfile for load testing IDVA functionality """
import os
from locust import HttpUser, task, constant_pacing, tag
HTTP_FLOW_PATH = f"/v1/company/wdK3fH48XuoXzvZyeNJEYFA9i8K72BZg/flows/IU1iDIvviIth5jiYmNvgsS43Kg29RxyB/start"
SK_API_KEY = os.getenv("SK_API_KEY")
class SKLoadTestUser(HttpUser):
"""Load test SK"""
wait_time = constant_pacing(1)
@task(1)
@tag("basic_load")
def test_flow(self):
"""Invoke basic sk test flow"""
self.client.post(HTTP_FLOW_PATH, headers={"x-sk-api-key": SK_API_KEY})
|
en
| 0.416422
|
Locustfile for load testing IDVA functionality Load test SK Invoke basic sk test flow
| 2.339033
| 2
|
leetcode/python/maximumDepthOfBinaryTree.py
|
yaoxuanw007/forfun
| 0
|
6625906
|
<gh_stars>0
# https://oj.leetcode.com/problems/maximum-depth-of-binary-tree/
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# 11:02 - 11:08
class Solution:
# @param root, a tree node
# @return an integer
def maxDepth(self, root):
if root == None:
return 0
self.result = -1
self.getMaxDepth(root, 0)
return self.result
def getMaxDepth(self, root, depth):
if root == None:
return
depth += 1
if root.left == None and root.right == None:
if self.result < 0 or self.result < depth:
self.result = depth
return
self.getMaxDepth(root.left, depth)
self.getMaxDepth(root.right, depth)
s = Solution()
root = None
print s.maxDepth(root)
root = TreeNode(1)
root.left = TreeNode(1)
print s.maxDepth(root)
|
# https://oj.leetcode.com/problems/maximum-depth-of-binary-tree/
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# 11:02 - 11:08
class Solution:
# @param root, a tree node
# @return an integer
def maxDepth(self, root):
if root == None:
return 0
self.result = -1
self.getMaxDepth(root, 0)
return self.result
def getMaxDepth(self, root, depth):
if root == None:
return
depth += 1
if root.left == None and root.right == None:
if self.result < 0 or self.result < depth:
self.result = depth
return
self.getMaxDepth(root.left, depth)
self.getMaxDepth(root.right, depth)
s = Solution()
root = None
print s.maxDepth(root)
root = TreeNode(1)
root.left = TreeNode(1)
print s.maxDepth(root)
|
en
| 0.561996
|
# https://oj.leetcode.com/problems/maximum-depth-of-binary-tree/ # Definition for a binary tree node # 11:02 - 11:08 # @param root, a tree node # @return an integer
| 3.888313
| 4
|
mundo_1/ex006.py
|
tseiiti/curso_em_video
| 0
|
6625907
|
from os import system, name
system('cls' if name == 'nt' else 'clear')
dsc = ('''DESAFIO 006:
Faça um algoritmo que leia um número e mostre na tela o seu dobro, triplo e
raiz quadrada.
''')
n = float(input('Digite um número: '))
print('o dobro é: {}'.format(n * 2))
print('o triplo é: {}'.format(n * 3))
print('a raiz é: {:.2f}'.format(n ** 0.5))
|
from os import system, name
system('cls' if name == 'nt' else 'clear')
dsc = ('''DESAFIO 006:
Faça um algoritmo que leia um número e mostre na tela o seu dobro, triplo e
raiz quadrada.
''')
n = float(input('Digite um número: '))
print('o dobro é: {}'.format(n * 2))
print('o triplo é: {}'.format(n * 3))
print('a raiz é: {:.2f}'.format(n ** 0.5))
|
pt
| 0.861652
|
DESAFIO 006: Faça um algoritmo que leia um número e mostre na tela o seu dobro, triplo e raiz quadrada.
| 3.661867
| 4
|
test/IntentEngineTest.py
|
fakegit/adapt
| 796
|
6625908
|
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from adapt.engine import IntentDeterminationEngine
from adapt.intent import IntentBuilder
__author__ = 'seanfitz'
class IntentEngineTests(unittest.TestCase):
def setUp(self):
self.engine = IntentDeterminationEngine()
def testRegisterIntentParser(self):
assert len(self.engine.intent_parsers) == 0
try:
self.engine.register_intent_parser("NOTAPARSER")
assert "Did not fail to register invalid intent parser" and False
except ValueError as e:
pass
parser = IntentBuilder("Intent").build()
self.engine.register_intent_parser(parser)
assert len(self.engine.intent_parsers) == 1
def testRegisterRegexEntity(self):
assert len(self.engine._regex_strings) == 0
assert len(self.engine.regular_expressions_entities) == 0
self.engine.register_regex_entity(".*")
assert len(self.engine._regex_strings) == 1
assert len(self.engine.regular_expressions_entities) == 1
def testSelectBestIntent(self):
parser1 = IntentBuilder("Parser1").require("Entity1").build()
self.engine.register_intent_parser(parser1)
self.engine.register_entity("tree", "Entity1")
utterance = "go to the tree house"
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser1'
parser2 = IntentBuilder("Parser2").require("Entity1").require("Entity2").build()
self.engine.register_intent_parser(parser2)
self.engine.register_entity("house", "Entity2")
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser2'
def testDropIntent(self):
parser1 = IntentBuilder("Parser1").require("Entity1").build()
self.engine.register_intent_parser(parser1)
self.engine.register_entity("tree", "Entity1")
parser2 = (IntentBuilder("Parser2").require("Entity1")
.require("Entity2").build())
self.engine.register_intent_parser(parser2)
self.engine.register_entity("house", "Entity2")
utterance = "go to the tree house"
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser2'
assert self.engine.drop_intent_parser('Parser2') is True
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser1'
def testDropEntity(self):
parser1 = IntentBuilder("Parser1").require("Entity1").build()
self.engine.register_intent_parser(parser1)
self.engine.register_entity("laboratory", "Entity1")
self.engine.register_entity("lab", "Entity1")
utterance = "get out of my lab"
utterance2 = "get out of my laboratory"
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser1'
intent = next(self.engine.determine_intent(utterance2))
assert intent
assert intent['intent_type'] == 'Parser1'
# Remove Entity and re-register laboratory and make sure only that
# matches.
self.engine.drop_entity(entity_type='Entity1')
self.engine.register_entity("laboratory", "Entity1")
# Sentence containing lab should not produce any results
with self.assertRaises(StopIteration):
intent = next(self.engine.determine_intent(utterance))
# But sentence with laboratory should
intent = next(self.engine.determine_intent(utterance2))
assert intent
assert intent['intent_type'] == 'Parser1'
def testCustomDropEntity(self):
parser1 = (IntentBuilder("Parser1").one_of("Entity1", "Entity2")
.build())
self.engine.register_intent_parser(parser1)
self.engine.register_entity("laboratory", "Entity1")
self.engine.register_entity("lab", "Entity2")
utterance = "get out of my lab"
utterance2 = "get out of my laboratory"
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser1'
intent = next(self.engine.determine_intent(utterance2))
assert intent
assert intent['intent_type'] == 'Parser1'
def matcher(data):
return data[1].startswith('Entity')
self.engine.drop_entity(match_func=matcher)
self.engine.register_entity("laboratory", "Entity1")
# Sentence containing lab should not produce any results
with self.assertRaises(StopIteration):
intent = next(self.engine.determine_intent(utterance))
# But sentence with laboratory should
intent = next(self.engine.determine_intent(utterance2))
assert intent
def testDropRegexEntity(self):
self.engine.register_regex_entity(r"the dog (?P<Dog>.*)")
self.engine.register_regex_entity(r"the cat (?P<Cat>.*)")
assert len(self.engine._regex_strings) == 2
assert len(self.engine.regular_expressions_entities) == 2
self.engine.drop_regex_entity(entity_type='Cat')
assert len(self.engine._regex_strings) == 1
assert len(self.engine.regular_expressions_entities) == 1
def testCustomDropRegexEntity(self):
self.engine.register_regex_entity(r"the dog (?P<SkillADog>.*)")
self.engine.register_regex_entity(r"the cat (?P<SkillACat>.*)")
self.engine.register_regex_entity(r"the mangy dog (?P<SkillBDog>.*)")
assert len(self.engine._regex_strings) == 3
assert len(self.engine.regular_expressions_entities) == 3
def matcher(regexp):
"""Matcher for all match groups defined for SkillB"""
match_groups = regexp.groupindex.keys()
return any([k.startswith('SkillB') for k in match_groups])
self.engine.drop_regex_entity(match_func=matcher)
assert len(self.engine._regex_strings) == 2
assert len(self.engine.regular_expressions_entities) == 2
def testAddingOfRemovedRegexp(self):
self.engine.register_regex_entity(r"the cool (?P<thing>.*)")
def matcher(regexp):
"""Matcher for all match groups defined for SkillB"""
match_groups = regexp.groupindex.keys()
return any([k.startswith('thing') for k in match_groups])
self.engine.drop_regex_entity(match_func=matcher)
assert len(self.engine.regular_expressions_entities) == 0
self.engine.register_regex_entity(r"the cool (?P<thing>.*)")
assert len(self.engine.regular_expressions_entities) == 1
def testUsingOfRemovedRegexp(self):
self.engine.register_regex_entity(r"the cool (?P<thing>.*)")
parser = IntentBuilder("Intent").require("thing").build()
self.engine.register_intent_parser(parser)
def matcher(regexp):
"""Matcher for all match groups defined for SkillB"""
match_groups = regexp.groupindex.keys()
return any([k.startswith('thing') for k in match_groups])
self.engine.drop_regex_entity(match_func=matcher)
assert len(self.engine.regular_expressions_entities) == 0
utterance = "the cool cat"
intents = [match for match in self.engine.determine_intent(utterance)]
assert len(intents) == 0
def testEmptyTags(self):
# Validates https://github.com/MycroftAI/adapt/issues/114
engine = IntentDeterminationEngine()
engine.register_entity("Kevin",
"who") # same problem if several entities
builder = IntentBuilder("Buddies")
builder.optionally("who") # same problem if several entity types
engine.register_intent_parser(builder.build())
intents = [i for i in engine.determine_intent("Julien is a friend")]
assert len(intents) == 0
def testResultsAreSortedByConfidence(self):
self.engine.register_entity('what is', 'Query', None)
self.engine.register_entity('weather', 'Weather', None)
self.engine.register_regex_entity('(at|in) (?P<Location>.+)')
self.engine.register_regex_entity('(?P<Entity>.*)')
i = IntentBuilder("CurrentWeatherIntent").require(
"Weather").optionally("Location").build()
self.engine.register_intent_parser(i)
utterance = "what is the weather like in stockholm"
intents = [
i for i in self.engine.determine_intent(utterance, num_results=100)
]
confidences = [intent.get('confidence', 0.0) for intent in intents]
assert len(confidences) > 1
assert all(confidences[i] >= confidences[i+1] for i in range(len(confidences)-1))
|
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from adapt.engine import IntentDeterminationEngine
from adapt.intent import IntentBuilder
__author__ = 'seanfitz'
class IntentEngineTests(unittest.TestCase):
def setUp(self):
self.engine = IntentDeterminationEngine()
def testRegisterIntentParser(self):
assert len(self.engine.intent_parsers) == 0
try:
self.engine.register_intent_parser("NOTAPARSER")
assert "Did not fail to register invalid intent parser" and False
except ValueError as e:
pass
parser = IntentBuilder("Intent").build()
self.engine.register_intent_parser(parser)
assert len(self.engine.intent_parsers) == 1
def testRegisterRegexEntity(self):
assert len(self.engine._regex_strings) == 0
assert len(self.engine.regular_expressions_entities) == 0
self.engine.register_regex_entity(".*")
assert len(self.engine._regex_strings) == 1
assert len(self.engine.regular_expressions_entities) == 1
def testSelectBestIntent(self):
parser1 = IntentBuilder("Parser1").require("Entity1").build()
self.engine.register_intent_parser(parser1)
self.engine.register_entity("tree", "Entity1")
utterance = "go to the tree house"
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser1'
parser2 = IntentBuilder("Parser2").require("Entity1").require("Entity2").build()
self.engine.register_intent_parser(parser2)
self.engine.register_entity("house", "Entity2")
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser2'
def testDropIntent(self):
parser1 = IntentBuilder("Parser1").require("Entity1").build()
self.engine.register_intent_parser(parser1)
self.engine.register_entity("tree", "Entity1")
parser2 = (IntentBuilder("Parser2").require("Entity1")
.require("Entity2").build())
self.engine.register_intent_parser(parser2)
self.engine.register_entity("house", "Entity2")
utterance = "go to the tree house"
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser2'
assert self.engine.drop_intent_parser('Parser2') is True
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser1'
def testDropEntity(self):
parser1 = IntentBuilder("Parser1").require("Entity1").build()
self.engine.register_intent_parser(parser1)
self.engine.register_entity("laboratory", "Entity1")
self.engine.register_entity("lab", "Entity1")
utterance = "get out of my lab"
utterance2 = "get out of my laboratory"
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser1'
intent = next(self.engine.determine_intent(utterance2))
assert intent
assert intent['intent_type'] == 'Parser1'
# Remove Entity and re-register laboratory and make sure only that
# matches.
self.engine.drop_entity(entity_type='Entity1')
self.engine.register_entity("laboratory", "Entity1")
# Sentence containing lab should not produce any results
with self.assertRaises(StopIteration):
intent = next(self.engine.determine_intent(utterance))
# But sentence with laboratory should
intent = next(self.engine.determine_intent(utterance2))
assert intent
assert intent['intent_type'] == 'Parser1'
def testCustomDropEntity(self):
parser1 = (IntentBuilder("Parser1").one_of("Entity1", "Entity2")
.build())
self.engine.register_intent_parser(parser1)
self.engine.register_entity("laboratory", "Entity1")
self.engine.register_entity("lab", "Entity2")
utterance = "get out of my lab"
utterance2 = "get out of my laboratory"
intent = next(self.engine.determine_intent(utterance))
assert intent
assert intent['intent_type'] == 'Parser1'
intent = next(self.engine.determine_intent(utterance2))
assert intent
assert intent['intent_type'] == 'Parser1'
def matcher(data):
return data[1].startswith('Entity')
self.engine.drop_entity(match_func=matcher)
self.engine.register_entity("laboratory", "Entity1")
# Sentence containing lab should not produce any results
with self.assertRaises(StopIteration):
intent = next(self.engine.determine_intent(utterance))
# But sentence with laboratory should
intent = next(self.engine.determine_intent(utterance2))
assert intent
def testDropRegexEntity(self):
self.engine.register_regex_entity(r"the dog (?P<Dog>.*)")
self.engine.register_regex_entity(r"the cat (?P<Cat>.*)")
assert len(self.engine._regex_strings) == 2
assert len(self.engine.regular_expressions_entities) == 2
self.engine.drop_regex_entity(entity_type='Cat')
assert len(self.engine._regex_strings) == 1
assert len(self.engine.regular_expressions_entities) == 1
def testCustomDropRegexEntity(self):
self.engine.register_regex_entity(r"the dog (?P<SkillADog>.*)")
self.engine.register_regex_entity(r"the cat (?P<SkillACat>.*)")
self.engine.register_regex_entity(r"the mangy dog (?P<SkillBDog>.*)")
assert len(self.engine._regex_strings) == 3
assert len(self.engine.regular_expressions_entities) == 3
def matcher(regexp):
"""Matcher for all match groups defined for SkillB"""
match_groups = regexp.groupindex.keys()
return any([k.startswith('SkillB') for k in match_groups])
self.engine.drop_regex_entity(match_func=matcher)
assert len(self.engine._regex_strings) == 2
assert len(self.engine.regular_expressions_entities) == 2
def testAddingOfRemovedRegexp(self):
self.engine.register_regex_entity(r"the cool (?P<thing>.*)")
def matcher(regexp):
"""Matcher for all match groups defined for SkillB"""
match_groups = regexp.groupindex.keys()
return any([k.startswith('thing') for k in match_groups])
self.engine.drop_regex_entity(match_func=matcher)
assert len(self.engine.regular_expressions_entities) == 0
self.engine.register_regex_entity(r"the cool (?P<thing>.*)")
assert len(self.engine.regular_expressions_entities) == 1
def testUsingOfRemovedRegexp(self):
self.engine.register_regex_entity(r"the cool (?P<thing>.*)")
parser = IntentBuilder("Intent").require("thing").build()
self.engine.register_intent_parser(parser)
def matcher(regexp):
"""Matcher for all match groups defined for SkillB"""
match_groups = regexp.groupindex.keys()
return any([k.startswith('thing') for k in match_groups])
self.engine.drop_regex_entity(match_func=matcher)
assert len(self.engine.regular_expressions_entities) == 0
utterance = "the cool cat"
intents = [match for match in self.engine.determine_intent(utterance)]
assert len(intents) == 0
def testEmptyTags(self):
# Validates https://github.com/MycroftAI/adapt/issues/114
engine = IntentDeterminationEngine()
engine.register_entity("Kevin",
"who") # same problem if several entities
builder = IntentBuilder("Buddies")
builder.optionally("who") # same problem if several entity types
engine.register_intent_parser(builder.build())
intents = [i for i in engine.determine_intent("Julien is a friend")]
assert len(intents) == 0
def testResultsAreSortedByConfidence(self):
self.engine.register_entity('what is', 'Query', None)
self.engine.register_entity('weather', 'Weather', None)
self.engine.register_regex_entity('(at|in) (?P<Location>.+)')
self.engine.register_regex_entity('(?P<Entity>.*)')
i = IntentBuilder("CurrentWeatherIntent").require(
"Weather").optionally("Location").build()
self.engine.register_intent_parser(i)
utterance = "what is the weather like in stockholm"
intents = [
i for i in self.engine.determine_intent(utterance, num_results=100)
]
confidences = [intent.get('confidence', 0.0) for intent in intents]
assert len(confidences) > 1
assert all(confidences[i] >= confidences[i+1] for i in range(len(confidences)-1))
|
en
| 0.872663
|
# Copyright 2018 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Remove Entity and re-register laboratory and make sure only that # matches. # Sentence containing lab should not produce any results # But sentence with laboratory should # Sentence containing lab should not produce any results # But sentence with laboratory should Matcher for all match groups defined for SkillB Matcher for all match groups defined for SkillB Matcher for all match groups defined for SkillB # Validates https://github.com/MycroftAI/adapt/issues/114 # same problem if several entities # same problem if several entity types
| 2.321787
| 2
|
libtbx/sphinx/pubmed.py
|
ErwinP/cctbx_project
| 0
|
6625909
|
from __future__ import absolute_import, division, print_function
import docutils.parsers.rst
import multiprocessing
from Bio import Entrez
_biolock = multiprocessing.Lock()
def setup(app):
app.add_directive('pubmed', PubMedDirective)
return {"parallel_read_safe": True}
class PubMedDirective(docutils.parsers.rst.Directive):
# this disables content in the directive
has_content = False
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'reprint-url': docutils.parsers.rst.directives.unicode_code}
def run(self):
PMID = self.arguments[0]
reprint_url = self.options.get('reprint-url', None)
Entrez.email = '<EMAIL>'
with _biolock:
handle = Entrez.efetch(db="pubmed", id=PMID, retmode="xml")
XML = Entrez.read(handle)['PubmedArticle']
def raw_html_link_new_tab(identifier, link_text, link):
return '.. |%s| raw:: html\n\n' %identifier + \
' <a class="reference external" href="%s" target="_blank">%s</a>' %(
link, link_text)
raw_directives = []
text = []
for tag in XML:
# Title/doi link:
possible_doi = [ idx for idx in tag["PubmedData"]["ArticleIdList"]
if idx.attributes["IdType"]=="doi" ]
article = tag["MedlineCitation"]["Article"]
# Remove trailing dot and all control characters, including newline chars, from title.
get_title = ''.join(c for c in article['ArticleTitle'].rstrip('.') if ord(c) >= 32)
doi_link_text = None
if len(possible_doi) > 0:
text.append('| |%s|' %possible_doi[0])
raw_directives.append(raw_html_link_new_tab(
possible_doi[0], get_title, "https://doi.org/%s" %possible_doi[0]))
# Author list
authors = [ " ".join([elem["LastName"],elem["Initials"]])
for elem in article["AuthorList"] ]
text.append("| %s." %(", ".join(authors)))
# Journal reference
journal = article["Journal"]
journal_text = "| *%s*" %(journal["ISOAbbreviation"])
issue = journal["JournalIssue"]
if 'Volume' in issue:
journal_text += " **%s**" % issue['Volume']
if 'Pagination' in article:
journal_text += ", %s" % article["Pagination"]["MedlinePgn"]
date = issue["PubDate"]
if 'Month' in date:
journal_text += " (%s %s %s)."%(date.get("Day",1), date["Month"], date["Year"])
else:
journal_text += " (%s)"%(date["Year"])
journal_text += " [PMID:%s]"%PMID
possible_pmc = [ idx for idx in tag["PubmedData"]["ArticleIdList"]
if idx.attributes["IdType"]=="pmc" ]
if len(possible_pmc) > 0:
journal_text += " [PMC reprint: |%s|]" %possible_pmc[0]
raw_directives.append(raw_html_link_new_tab(
possible_pmc[0], "%s" %possible_pmc[0],
"http://ncbi.nlm.nih.gov/pmc/articles/%s/" %possible_pmc[0]))
if reprint_url is not None:
journal_text += " |%s_reprint|" %PMID
raw_directives.append(raw_html_link_new_tab(
"%s_reprint" %PMID, "(Reprint)", reprint_url))
text.append(journal_text)
for directive in raw_directives:
text.append("\n%s\n" %directive)
# try:
# print("vvv")
# print("\n".join(text))
# print("^^^")
# except Exception:
# pass
# insert rst
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
lines = docutils.statemachine.string2lines(
"\n".join(text), self.state.document.settings.tab_width, convert_whitespace=True)
self.state_machine.insert_input(lines, source)
return []
|
from __future__ import absolute_import, division, print_function
import docutils.parsers.rst
import multiprocessing
from Bio import Entrez
_biolock = multiprocessing.Lock()
def setup(app):
app.add_directive('pubmed', PubMedDirective)
return {"parallel_read_safe": True}
class PubMedDirective(docutils.parsers.rst.Directive):
# this disables content in the directive
has_content = False
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'reprint-url': docutils.parsers.rst.directives.unicode_code}
def run(self):
PMID = self.arguments[0]
reprint_url = self.options.get('reprint-url', None)
Entrez.email = '<EMAIL>'
with _biolock:
handle = Entrez.efetch(db="pubmed", id=PMID, retmode="xml")
XML = Entrez.read(handle)['PubmedArticle']
def raw_html_link_new_tab(identifier, link_text, link):
return '.. |%s| raw:: html\n\n' %identifier + \
' <a class="reference external" href="%s" target="_blank">%s</a>' %(
link, link_text)
raw_directives = []
text = []
for tag in XML:
# Title/doi link:
possible_doi = [ idx for idx in tag["PubmedData"]["ArticleIdList"]
if idx.attributes["IdType"]=="doi" ]
article = tag["MedlineCitation"]["Article"]
# Remove trailing dot and all control characters, including newline chars, from title.
get_title = ''.join(c for c in article['ArticleTitle'].rstrip('.') if ord(c) >= 32)
doi_link_text = None
if len(possible_doi) > 0:
text.append('| |%s|' %possible_doi[0])
raw_directives.append(raw_html_link_new_tab(
possible_doi[0], get_title, "https://doi.org/%s" %possible_doi[0]))
# Author list
authors = [ " ".join([elem["LastName"],elem["Initials"]])
for elem in article["AuthorList"] ]
text.append("| %s." %(", ".join(authors)))
# Journal reference
journal = article["Journal"]
journal_text = "| *%s*" %(journal["ISOAbbreviation"])
issue = journal["JournalIssue"]
if 'Volume' in issue:
journal_text += " **%s**" % issue['Volume']
if 'Pagination' in article:
journal_text += ", %s" % article["Pagination"]["MedlinePgn"]
date = issue["PubDate"]
if 'Month' in date:
journal_text += " (%s %s %s)."%(date.get("Day",1), date["Month"], date["Year"])
else:
journal_text += " (%s)"%(date["Year"])
journal_text += " [PMID:%s]"%PMID
possible_pmc = [ idx for idx in tag["PubmedData"]["ArticleIdList"]
if idx.attributes["IdType"]=="pmc" ]
if len(possible_pmc) > 0:
journal_text += " [PMC reprint: |%s|]" %possible_pmc[0]
raw_directives.append(raw_html_link_new_tab(
possible_pmc[0], "%s" %possible_pmc[0],
"http://ncbi.nlm.nih.gov/pmc/articles/%s/" %possible_pmc[0]))
if reprint_url is not None:
journal_text += " |%s_reprint|" %PMID
raw_directives.append(raw_html_link_new_tab(
"%s_reprint" %PMID, "(Reprint)", reprint_url))
text.append(journal_text)
for directive in raw_directives:
text.append("\n%s\n" %directive)
# try:
# print("vvv")
# print("\n".join(text))
# print("^^^")
# except Exception:
# pass
# insert rst
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
lines = docutils.statemachine.string2lines(
"\n".join(text), self.state.document.settings.tab_width, convert_whitespace=True)
self.state_machine.insert_input(lines, source)
return []
|
en
| 0.739174
|
# this disables content in the directive # Title/doi link: # Remove trailing dot and all control characters, including newline chars, from title. # Author list # Journal reference # try: # print("vvv") # print("\n".join(text)) # print("^^^") # except Exception: # pass # insert rst
| 2.248182
| 2
|
cscs-checks/prgenv/cuda/cuda_aware_mpi.py
|
jacwah/reframe
| 0
|
6625910
|
<reponame>jacwah/reframe<filename>cscs-checks/prgenv/cuda/cuda_aware_mpi.py
# Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import reframe as rfm
import reframe.utility.sanity as sn
import cscstests.microbenchmarks.gpu.hooks as hooks
@rfm.simple_test
class cuda_aware_mpi_check(rfm.CompileOnlyRegressionTest):
descr = 'Cuda-aware MPI test from the NVIDIA repo.'
sourcesdir = ('https://github.com/NVIDIA-developer-blog/'
'code-samples.git')
valid_systems = [
'daint:gpu', 'dom:gpu', 'arolla:cn', 'tsa:cn',
'ault:amdv100', 'ault:intelv100'
]
prebuild_cmds = ['cd posts/cuda-aware-mpi-example/src']
build_system = 'Make'
postbuild_cmds = ['ls ../bin']
maintainers = ['JO']
tags = {'production', 'scs'}
gpu_arch = variable(str, type(None))
@run_after('init')
def set_valid_prog_environs(self):
if self.current_system.name in ['arolla', 'tsa']:
self.valid_prog_environs = ['PrgEnv-gnu']
elif self.current_system.name in ['ault']:
self.valid_prog_environs = ['PrgEnv-gnu']
else:
self.valid_prog_environs = ['PrgEnv-cray', 'PrgEnv-gnu',
'PrgEnv-pgi', 'PrgEnv-nvidia']
if self.current_system.name in ['arolla', 'tsa', 'ault']:
self.exclusive_access = True
run_after('setup')(bind(hooks.set_gpu_arch))
run_after('setup')(bind(hooks.set_num_gpus_per_node))
@run_before('compile')
def set_compilers(self):
if self.current_environ.name == 'PrgEnv-pgi':
self.build_system.cflags = ['-std=c99', ' -O3']
elif self.current_environ.name == 'PrgEnv-nvidia':
self.variables = {
'CUDA_HOME': '$CRAY_NVIDIA_PREFIX/cuda'
}
gcd_flgs = (
'-gencode arch=compute_{0},code=sm_{0}'.format(self.gpu_arch)
)
self.build_system.options = [
'CUDA_INSTALL_PATH=$CUDA_HOME',
'MPI_HOME=$CRAY_MPICH_PREFIX',
'GENCODE_FLAGS="%s"' % (gcd_flgs),
'MPICC="%s"' % self.current_environ.cc,
'MPILD="%s"' % self.current_environ.cxx
]
@run_before('sanity')
def set_sanity_patterns(self):
self.sanity_patterns = sn.assert_found(r'jacobi_cuda_aware_mpi',
self.stdout)
class CudaAwareMpiRuns(rfm.RunOnlyRegressionTest):
prerun_cmds = ['export MPICH_RDMA_ENABLED_CUDA=1']
valid_systems = [
'daint:gpu', 'dom:gpu', 'arolla:cn', 'tsa:cn',
'ault:amdv100', 'ault:intelv100'
]
@run_after('init')
def set_valid_prog_environs(self):
if self.current_system.name in ['arolla', 'tsa']:
self.valid_prog_environs = ['PrgEnv-gnu']
elif self.current_system.name in ['ault']:
self.valid_prog_environs = ['PrgEnv-gnu']
else:
self.valid_prog_environs = ['PrgEnv-cray', 'PrgEnv-gnu',
'PrgEnv-pgi', 'PrgEnv-nvidia']
if self.current_system.name in ['arolla', 'tsa', 'ault']:
self.exclusive_access = True
@run_after('init')
def add_deps(self):
self.depends_on('cuda_aware_mpi_check')
run_after('setup')(bind(hooks.set_gpu_arch))
run_after('setup')(bind(hooks.set_num_gpus_per_node))
@require_deps
def set_executable(self, cuda_aware_mpi_check):
self.executable = os.path.join(
cuda_aware_mpi_check().stagedir,
'posts', 'cuda-aware-mpi-example',
'bin', 'jacobi_cuda_aware_mpi'
)
@run_before('sanity')
def set_sanity_patterns(self):
self.sanity_patterns = sn.assert_found(r'Stopped after 1000 iterations'
r' with residue 0.00024',
self.stdout)
@rfm.simple_test
class cuda_aware_mpi_one_node_check(CudaAwareMpiRuns):
'''Run the case in one node.'''
prerun_cmds += ['export CRAY_CUDA_MPS=1']
@run_before('run')
def set_num_tasks(self):
self.num_tasks = 2 * self.num_gpus_per_node
self.num_tasks_per_node = self.num_tasks
self.executable_opts = [f'-t {self.num_tasks/2} 2']
@rfm.simple_test
class cuda_aware_mpi_two_nodes_check(CudaAwareMpiRuns):
'''Run the case across two nodes.'''
@run_before('run')
def set_num_tasks(self):
self.num_tasks = 2
self.num_tasks_per_node = 1
self.num_gpus_per_node = 1
self.executable_opts = ['-t %d 1' % self.num_tasks]
|
# Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import reframe as rfm
import reframe.utility.sanity as sn
import cscstests.microbenchmarks.gpu.hooks as hooks
@rfm.simple_test
class cuda_aware_mpi_check(rfm.CompileOnlyRegressionTest):
descr = 'Cuda-aware MPI test from the NVIDIA repo.'
sourcesdir = ('https://github.com/NVIDIA-developer-blog/'
'code-samples.git')
valid_systems = [
'daint:gpu', 'dom:gpu', 'arolla:cn', 'tsa:cn',
'ault:amdv100', 'ault:intelv100'
]
prebuild_cmds = ['cd posts/cuda-aware-mpi-example/src']
build_system = 'Make'
postbuild_cmds = ['ls ../bin']
maintainers = ['JO']
tags = {'production', 'scs'}
gpu_arch = variable(str, type(None))
@run_after('init')
def set_valid_prog_environs(self):
if self.current_system.name in ['arolla', 'tsa']:
self.valid_prog_environs = ['PrgEnv-gnu']
elif self.current_system.name in ['ault']:
self.valid_prog_environs = ['PrgEnv-gnu']
else:
self.valid_prog_environs = ['PrgEnv-cray', 'PrgEnv-gnu',
'PrgEnv-pgi', 'PrgEnv-nvidia']
if self.current_system.name in ['arolla', 'tsa', 'ault']:
self.exclusive_access = True
run_after('setup')(bind(hooks.set_gpu_arch))
run_after('setup')(bind(hooks.set_num_gpus_per_node))
@run_before('compile')
def set_compilers(self):
if self.current_environ.name == 'PrgEnv-pgi':
self.build_system.cflags = ['-std=c99', ' -O3']
elif self.current_environ.name == 'PrgEnv-nvidia':
self.variables = {
'CUDA_HOME': '$CRAY_NVIDIA_PREFIX/cuda'
}
gcd_flgs = (
'-gencode arch=compute_{0},code=sm_{0}'.format(self.gpu_arch)
)
self.build_system.options = [
'CUDA_INSTALL_PATH=$CUDA_HOME',
'MPI_HOME=$CRAY_MPICH_PREFIX',
'GENCODE_FLAGS="%s"' % (gcd_flgs),
'MPICC="%s"' % self.current_environ.cc,
'MPILD="%s"' % self.current_environ.cxx
]
@run_before('sanity')
def set_sanity_patterns(self):
self.sanity_patterns = sn.assert_found(r'jacobi_cuda_aware_mpi',
self.stdout)
class CudaAwareMpiRuns(rfm.RunOnlyRegressionTest):
prerun_cmds = ['export MPICH_RDMA_ENABLED_CUDA=1']
valid_systems = [
'daint:gpu', 'dom:gpu', 'arolla:cn', 'tsa:cn',
'ault:amdv100', 'ault:intelv100'
]
@run_after('init')
def set_valid_prog_environs(self):
if self.current_system.name in ['arolla', 'tsa']:
self.valid_prog_environs = ['PrgEnv-gnu']
elif self.current_system.name in ['ault']:
self.valid_prog_environs = ['PrgEnv-gnu']
else:
self.valid_prog_environs = ['PrgEnv-cray', 'PrgEnv-gnu',
'PrgEnv-pgi', 'PrgEnv-nvidia']
if self.current_system.name in ['arolla', 'tsa', 'ault']:
self.exclusive_access = True
@run_after('init')
def add_deps(self):
self.depends_on('cuda_aware_mpi_check')
run_after('setup')(bind(hooks.set_gpu_arch))
run_after('setup')(bind(hooks.set_num_gpus_per_node))
@require_deps
def set_executable(self, cuda_aware_mpi_check):
self.executable = os.path.join(
cuda_aware_mpi_check().stagedir,
'posts', 'cuda-aware-mpi-example',
'bin', 'jacobi_cuda_aware_mpi'
)
@run_before('sanity')
def set_sanity_patterns(self):
self.sanity_patterns = sn.assert_found(r'Stopped after 1000 iterations'
r' with residue 0.00024',
self.stdout)
@rfm.simple_test
class cuda_aware_mpi_one_node_check(CudaAwareMpiRuns):
'''Run the case in one node.'''
prerun_cmds += ['export CRAY_CUDA_MPS=1']
@run_before('run')
def set_num_tasks(self):
self.num_tasks = 2 * self.num_gpus_per_node
self.num_tasks_per_node = self.num_tasks
self.executable_opts = [f'-t {self.num_tasks/2} 2']
@rfm.simple_test
class cuda_aware_mpi_two_nodes_check(CudaAwareMpiRuns):
'''Run the case across two nodes.'''
@run_before('run')
def set_num_tasks(self):
self.num_tasks = 2
self.num_tasks_per_node = 1
self.num_gpus_per_node = 1
self.executable_opts = ['-t %d 1' % self.num_tasks]
|
en
| 0.661538
|
# Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich) # ReFrame Project Developers. See the top-level LICENSE file for details. # # SPDX-License-Identifier: BSD-3-Clause Run the case in one node. Run the case across two nodes.
| 1.85104
| 2
|
hack/benchmark/run-consensus.py
|
xvzf/htw-saar-vaa
| 0
|
6625911
|
import os
import time
# Deploy environmet
for i in range(10):
print(f"[+] Deploy test{i}")
os.system(f"tk apply environments/consensus --dangerous-auto-approve --name=test{i}")
print("[+] Wait for pods to be up")
os.system("kubectl wait -n default --for=condition=Ready pod --all")
print("[+] Wait 30s")
time.sleep(30)
os.system("sh start-consensus-parallel.sh")
# print("[+] Wait 30s")
# time.sleep(30)
# print("[+] Teardown")
# os.system("kubectl delete -n default pod --all --grace-period=0")
|
import os
import time
# Deploy environmet
for i in range(10):
print(f"[+] Deploy test{i}")
os.system(f"tk apply environments/consensus --dangerous-auto-approve --name=test{i}")
print("[+] Wait for pods to be up")
os.system("kubectl wait -n default --for=condition=Ready pod --all")
print("[+] Wait 30s")
time.sleep(30)
os.system("sh start-consensus-parallel.sh")
# print("[+] Wait 30s")
# time.sleep(30)
# print("[+] Teardown")
# os.system("kubectl delete -n default pod --all --grace-period=0")
|
en
| 0.241929
|
# Deploy environmet # print("[+] Wait 30s") # time.sleep(30) # print("[+] Teardown") # os.system("kubectl delete -n default pod --all --grace-period=0")
| 2.139918
| 2
|
pybeehive/asyn/__init__.py
|
sentrip/pybeehive
| 2
|
6625912
|
<filename>pybeehive/asyn/__init__.py
from ..core import Event
from .core import Listener, Streamer
from .hive import Hive
from .utils import async_generator
__all__ = [
'Event', 'Listener', 'Streamer',
'Hive', 'async_generator'
]
|
<filename>pybeehive/asyn/__init__.py
from ..core import Event
from .core import Listener, Streamer
from .hive import Hive
from .utils import async_generator
__all__ = [
'Event', 'Listener', 'Streamer',
'Hive', 'async_generator'
]
|
none
| 1
| 1.271277
| 1
|
|
birdfsd_yolov5/label_studio_helpers/watchdog.py
|
bird-feeder/BirdFSD-YOLOv5
| 0
|
6625913
|
<filename>birdfsd_yolov5/label_studio_helpers/watchdog.py
#!/usr/bin/env python
# coding: utf-8
import argparse
import copy
import imghdr
import shutil
import time
import uuid
from glob import glob
from pathlib import Path
from PIL import Image, UnidentifiedImageError
from dotenv import load_dotenv
from loguru import logger
from add_and_sync_new_project import add_new_project, \
add_and_sync_data_storage
from utils import catch_keyboard_interrupt
class MissingArgument(Exception):
pass
class WatchDog:
def __init__(self, root_data_folder, images_per_folder=1000, debug=False):
self.root_data_folder = root_data_folder
self.images_per_folder = images_per_folder
self.debug = debug
@staticmethod
def create_dummy_data():
dummy_projects = ['project-1000', 'project-1001', 'MOVE_ME']
for project in dummy_projects:
num_dummies = 1000
proj_folder = Path(f'dummy/{project}')
if project == dummy_projects[-2]:
num_dummies = 200
elif project == 'MOVE_ME':
num_dummies = 3600
proj_folder = Path(project)
proj_folder.mkdir(exist_ok=True, parents=True)
for _ in range(num_dummies):
fname = str(uuid.uuid4())
Path(proj_folder /
Path(f'fake_{str(fname).zfill(4)}.jpg')).touch()
logger.debug('Created dummy data')
def validate_image_file(self, file: str) -> str:
if self.debug:
return file
try:
if imghdr.what(file) and Image.open(file):
return file
except UnidentifiedImageError:
time.sleep(1)
if imghdr.what(file) and Image.open(file):
return file
else:
logger.warning(f'`{file}` is corrupted!')
shutil.move(
file,
f'{Path(self.root_data_folder).parent}/data_corrupted')
def generate_next_folder_name(self) -> str:
project_folders = sorted(glob(f'{self.root_data_folder}/project-*'))
num = str(int(project_folders[-1].split('project-')[-1]) + 1).zfill(4)
Path(f'{self.root_data_folder}/project-{num}').mkdir()
return f'{self.root_data_folder}/project-{num}'
def refresh_source(self) -> tuple:
folders = glob(f'{self.root_data_folder}/*')
project_folders = glob(f'{self.root_data_folder}/project-*')
new_folders = list(set(folders).difference(project_folders))
if new_folders:
logger.debug(f'New folder(s) detected: {new_folders}')
new_files = []
for new_folder in new_folders:
cur_files = [
x for x in glob(f'{new_folder}/**/*', recursive=True)
if not Path(x).is_dir()
]
if cur_files:
new_files.append(cur_files)
new_files = sum(new_files, [])
return project_folders, new_folders, new_files
def arrange_new_data_files(self) -> None:
Path(f'{Path(self.root_data_folder).parent}/data_corrupted').mkdir(
exist_ok=True)
project_folders, new_folders, new_files = self.refresh_source()
not_filled_folders = []
project_folders = sorted(glob(f'{self.root_data_folder}/project-*'))
for folder in project_folders:
folder_size = len(glob(f'{folder}/*'))
if self.images_per_folder > folder_size:
not_filled_folders.append((folder, folder_size))
logger.debug(f'Not filled: {folder}, size: {folder_size}')
if not_filled_folders:
for folder, folder_size in not_filled_folders:
for file in new_files:
if self.validate_image_file(file):
try:
shutil.move(file, folder)
except shutil.Error:
fp = Path(file)
rand_str = str(uuid.uuid4()).split('-')[-1]
dest = f'{folder}/{fp.stem}_{rand_str}{fp.suffix}'
shutil.move(file, dest)
if len(glob(f'{folder}/*')) == 1000:
break
project_folders, new_folders, new_files = self.refresh_source()
chunks = [
new_files[i:i + self.images_per_folder]
for i in range(0, len(new_files), self.images_per_folder)
]
for chunk in chunks:
dst = self.generate_next_folder_name()
folder_name = Path(dst).name
for file in chunk:
if self.validate_image_file(file):
shutil.move(file, dst)
if not self.debug:
new_project = add_new_project(folder_name)
_ = add_and_sync_data_storage(new_project['id'],
new_project['title'])
for empty_folder in new_folders:
contains_any_file = [
x for x in glob(f'{empty_folder}/**/*', recursive=True)
if not Path(x).is_dir()
]
if not contains_any_file:
shutil.rmtree(empty_folder)
def watch(self):
catch_keyboard_interrupt()
if self.debug:
self.root_data_folder = 'dummy'
self.create_dummy_data()
else:
Path(f'{self.root_data_folder}/project-0001').mkdir(exist_ok=True)
logger.debug('Started watchdog...')
global_state = glob(f'{self.root_data_folder}/**/*', recursive=True)
while True:
local_state = glob(f'{self.root_data_folder}/**/*', recursive=True)
if global_state != local_state:
logger.debug('Detected change!')
global_state = copy.deepcopy(local_state)
trigger_file = f'{self.root_data_folder}/init'
if Path(trigger_file).exists():
logger.debug('Triggered manually...')
Path(trigger_file).unlink()
self.arrange_new_data_files()
time.sleep(60)
if __name__ == '__main__':
load_dotenv()
parser = argparse.ArgumentParser()
parser.add_argument('--root-data-folder',
help='Path to the folder where all the data is kept',
type=str)
parser.add_argument('--debug', help='Debug mode', action='store_true')
parser.add_argument('--images-per-folder',
help='Number of images per folder',
type=int,
default=1000)
args = parser.parse_args()
if not args.root_data_folder and not args.debug:
raise MissingArgument(
'`--root_data_folder` is required when not in debug mode!')
watch_dog = WatchDog(root_data_folder=args.root_data_folder,
images_per_folder=args.images_per_folder,
debug=args.debug)
watch_dog.watch()
|
<filename>birdfsd_yolov5/label_studio_helpers/watchdog.py
#!/usr/bin/env python
# coding: utf-8
import argparse
import copy
import imghdr
import shutil
import time
import uuid
from glob import glob
from pathlib import Path
from PIL import Image, UnidentifiedImageError
from dotenv import load_dotenv
from loguru import logger
from add_and_sync_new_project import add_new_project, \
add_and_sync_data_storage
from utils import catch_keyboard_interrupt
class MissingArgument(Exception):
pass
class WatchDog:
def __init__(self, root_data_folder, images_per_folder=1000, debug=False):
self.root_data_folder = root_data_folder
self.images_per_folder = images_per_folder
self.debug = debug
@staticmethod
def create_dummy_data():
dummy_projects = ['project-1000', 'project-1001', 'MOVE_ME']
for project in dummy_projects:
num_dummies = 1000
proj_folder = Path(f'dummy/{project}')
if project == dummy_projects[-2]:
num_dummies = 200
elif project == 'MOVE_ME':
num_dummies = 3600
proj_folder = Path(project)
proj_folder.mkdir(exist_ok=True, parents=True)
for _ in range(num_dummies):
fname = str(uuid.uuid4())
Path(proj_folder /
Path(f'fake_{str(fname).zfill(4)}.jpg')).touch()
logger.debug('Created dummy data')
def validate_image_file(self, file: str) -> str:
if self.debug:
return file
try:
if imghdr.what(file) and Image.open(file):
return file
except UnidentifiedImageError:
time.sleep(1)
if imghdr.what(file) and Image.open(file):
return file
else:
logger.warning(f'`{file}` is corrupted!')
shutil.move(
file,
f'{Path(self.root_data_folder).parent}/data_corrupted')
def generate_next_folder_name(self) -> str:
project_folders = sorted(glob(f'{self.root_data_folder}/project-*'))
num = str(int(project_folders[-1].split('project-')[-1]) + 1).zfill(4)
Path(f'{self.root_data_folder}/project-{num}').mkdir()
return f'{self.root_data_folder}/project-{num}'
def refresh_source(self) -> tuple:
folders = glob(f'{self.root_data_folder}/*')
project_folders = glob(f'{self.root_data_folder}/project-*')
new_folders = list(set(folders).difference(project_folders))
if new_folders:
logger.debug(f'New folder(s) detected: {new_folders}')
new_files = []
for new_folder in new_folders:
cur_files = [
x for x in glob(f'{new_folder}/**/*', recursive=True)
if not Path(x).is_dir()
]
if cur_files:
new_files.append(cur_files)
new_files = sum(new_files, [])
return project_folders, new_folders, new_files
def arrange_new_data_files(self) -> None:
Path(f'{Path(self.root_data_folder).parent}/data_corrupted').mkdir(
exist_ok=True)
project_folders, new_folders, new_files = self.refresh_source()
not_filled_folders = []
project_folders = sorted(glob(f'{self.root_data_folder}/project-*'))
for folder in project_folders:
folder_size = len(glob(f'{folder}/*'))
if self.images_per_folder > folder_size:
not_filled_folders.append((folder, folder_size))
logger.debug(f'Not filled: {folder}, size: {folder_size}')
if not_filled_folders:
for folder, folder_size in not_filled_folders:
for file in new_files:
if self.validate_image_file(file):
try:
shutil.move(file, folder)
except shutil.Error:
fp = Path(file)
rand_str = str(uuid.uuid4()).split('-')[-1]
dest = f'{folder}/{fp.stem}_{rand_str}{fp.suffix}'
shutil.move(file, dest)
if len(glob(f'{folder}/*')) == 1000:
break
project_folders, new_folders, new_files = self.refresh_source()
chunks = [
new_files[i:i + self.images_per_folder]
for i in range(0, len(new_files), self.images_per_folder)
]
for chunk in chunks:
dst = self.generate_next_folder_name()
folder_name = Path(dst).name
for file in chunk:
if self.validate_image_file(file):
shutil.move(file, dst)
if not self.debug:
new_project = add_new_project(folder_name)
_ = add_and_sync_data_storage(new_project['id'],
new_project['title'])
for empty_folder in new_folders:
contains_any_file = [
x for x in glob(f'{empty_folder}/**/*', recursive=True)
if not Path(x).is_dir()
]
if not contains_any_file:
shutil.rmtree(empty_folder)
def watch(self):
catch_keyboard_interrupt()
if self.debug:
self.root_data_folder = 'dummy'
self.create_dummy_data()
else:
Path(f'{self.root_data_folder}/project-0001').mkdir(exist_ok=True)
logger.debug('Started watchdog...')
global_state = glob(f'{self.root_data_folder}/**/*', recursive=True)
while True:
local_state = glob(f'{self.root_data_folder}/**/*', recursive=True)
if global_state != local_state:
logger.debug('Detected change!')
global_state = copy.deepcopy(local_state)
trigger_file = f'{self.root_data_folder}/init'
if Path(trigger_file).exists():
logger.debug('Triggered manually...')
Path(trigger_file).unlink()
self.arrange_new_data_files()
time.sleep(60)
if __name__ == '__main__':
load_dotenv()
parser = argparse.ArgumentParser()
parser.add_argument('--root-data-folder',
help='Path to the folder where all the data is kept',
type=str)
parser.add_argument('--debug', help='Debug mode', action='store_true')
parser.add_argument('--images-per-folder',
help='Number of images per folder',
type=int,
default=1000)
args = parser.parse_args()
if not args.root_data_folder and not args.debug:
raise MissingArgument(
'`--root_data_folder` is required when not in debug mode!')
watch_dog = WatchDog(root_data_folder=args.root_data_folder,
images_per_folder=args.images_per_folder,
debug=args.debug)
watch_dog.watch()
|
en
| 0.325294
|
#!/usr/bin/env python # coding: utf-8
| 2.155038
| 2
|
util.py
|
pl8848/jd-assistant
| 0
|
6625914
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import functools
import json
import os
import re
import warnings
from base64 import b64encode
import requests
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
from log import logger
RSA_PUBLIC_KEY = """-----<KEY>
-----END PUBLIC KEY-----"""
#USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'
USER_AGENT = 'Mozilla/6.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3359.181 Safari/537.36'
DEFAULT_EID = 'D5GZVU5ZO5VBUFMLOUHMNHK2BXXVKI4ZQK3JKCOIB4PRERKTQXV3BNSG557BQLPVVT4ZN3NKVSXAKTVPJXDEPEBDGU'
DEFAULT_FP = '18c7d83a053e6bbb51f755aea595bbb8'
DEFAULT_TRACK_ID = '9643cbd55bbbe103eef1<PASSWORD>'
def encrypt_pwd(password, public_key=RSA_PUBLIC_KEY):
rsa_key = RSA.importKey(public_key)
encryptor = Cipher_pkcs1_v1_5.new(rsa_key)
cipher = b64encode(encryptor.encrypt(password.encode('utf-8')))
return cipher.decode('utf-8')
def encrypt_payment_pwd(payment_pwd):
return ''.join(['u3' + x for x in payment_pwd])
def response_status(resp):
if resp.status_code != requests.codes.OK:
print('Status: %u, Url: %s' % (resp.status_code, resp.url))
return False
return True
def open_image(image_file):
if os.name == "nt":
os.system('start ' + image_file) # for Windows
else:
if os.uname()[0] == "Linux":
os.system("eog " + image_file) # for Linux
else:
os.system("open " + image_file) # for Mac
def save_image(resp, image_file):
with open(image_file, 'wb') as f:
for chunk in resp.iter_content(chunk_size=1024):
f.write(chunk)
def parse_json(s):
begin = s.find('{')
end = s.rfind('}') + 1
return json.loads(s[begin:end])
def get_tag_value(tag, key='', index=0):
if key:
value = tag[index].get(key)
else:
value = tag[index].text
return value.strip(' \t\r\n')
def parse_items_dict(d):
result = ''
for index, key in enumerate(d):
if index < len(d) - 1:
result = result + '{0} x {1}, '.format(key, d[key])
else:
result = result + '{0} x {1}'.format(key, d[key])
return result
def parse_sku_id(sku_ids):
"""将商品id字符串解析为字典
商品id字符串采用英文逗号进行分割。
可以在每个id后面用冒号加上数字,代表该商品的数量,如果不加数量则默认为1。
例如:
输入 --> 解析结果
'123456' --> {'123456': '1'}
'123456,123789' --> {'123456': '1', '123789': '1'}
'123456:1,123789:3' --> {'123456': '1', '123789': '3'}
'123456:2,123789' --> {'123456': '2', '123789': '1'}
:param sku_ids: 商品id字符串
:return: dict
"""
if isinstance(sku_ids, dict): # 防止重复解析
return sku_ids
sku_id_list = list(filter(bool, map(lambda x: x.strip(), sku_ids.split(','))))
result = dict()
for item in sku_id_list:
if ':' in item:
sku_id, count = map(lambda x: x.strip(), item.split(':'))
result[sku_id] = count
else:
result[item] = '1'
return result
def parse_area_id(area):
"""解析地区id字符串:将分隔符替换为下划线 _
:param area: 地区id字符串(使用 _ 或 - 进行分割),如 12_904_3375 或 12-904-3375
:return: 解析后字符串
"""
return area.replace('-', '_')
def split_area_id(area_id):
"""将地区id字符串按照下划线进行切割,构成数组。数组长度不满4位则用'0'进行填充。
:param area_id: 地区id字符串(使用 _ 或 - 进行分割),如 12_904_3375 或 12-904-3375
:return: list
"""
area = re.split('_|-', area_id)
area.extend((4 - len(area)) * ['0'])
return area
def deprecated(func):
"""This decorator is used to mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn(
"Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return new_func
def check_login(func):
"""用户登陆态校验装饰器。若用户未登陆,则调用扫码登陆"""
@functools.wraps(func)
def new_func(self, *args, **kwargs):
if not self.is_login:
logger.info("{0} 需登陆后调用,开始扫码登陆".format(func.__name__))
self.login_by_QRcode()
return func(self, *args, **kwargs)
return new_func
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import functools
import json
import os
import re
import warnings
from base64 import b64encode
import requests
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
from log import logger
RSA_PUBLIC_KEY = """-----<KEY>
-----END PUBLIC KEY-----"""
#USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'
USER_AGENT = 'Mozilla/6.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3359.181 Safari/537.36'
DEFAULT_EID = 'D5GZVU5ZO5VBUFMLOUHMNHK2BXXVKI4ZQK3JKCOIB4PRERKTQXV3BNSG557BQLPVVT4ZN3NKVSXAKTVPJXDEPEBDGU'
DEFAULT_FP = '18c7d83a053e6bbb51f755aea595bbb8'
DEFAULT_TRACK_ID = '9643cbd55bbbe103eef1<PASSWORD>'
def encrypt_pwd(password, public_key=RSA_PUBLIC_KEY):
rsa_key = RSA.importKey(public_key)
encryptor = Cipher_pkcs1_v1_5.new(rsa_key)
cipher = b64encode(encryptor.encrypt(password.encode('utf-8')))
return cipher.decode('utf-8')
def encrypt_payment_pwd(payment_pwd):
return ''.join(['u3' + x for x in payment_pwd])
def response_status(resp):
if resp.status_code != requests.codes.OK:
print('Status: %u, Url: %s' % (resp.status_code, resp.url))
return False
return True
def open_image(image_file):
if os.name == "nt":
os.system('start ' + image_file) # for Windows
else:
if os.uname()[0] == "Linux":
os.system("eog " + image_file) # for Linux
else:
os.system("open " + image_file) # for Mac
def save_image(resp, image_file):
with open(image_file, 'wb') as f:
for chunk in resp.iter_content(chunk_size=1024):
f.write(chunk)
def parse_json(s):
begin = s.find('{')
end = s.rfind('}') + 1
return json.loads(s[begin:end])
def get_tag_value(tag, key='', index=0):
if key:
value = tag[index].get(key)
else:
value = tag[index].text
return value.strip(' \t\r\n')
def parse_items_dict(d):
result = ''
for index, key in enumerate(d):
if index < len(d) - 1:
result = result + '{0} x {1}, '.format(key, d[key])
else:
result = result + '{0} x {1}'.format(key, d[key])
return result
def parse_sku_id(sku_ids):
"""将商品id字符串解析为字典
商品id字符串采用英文逗号进行分割。
可以在每个id后面用冒号加上数字,代表该商品的数量,如果不加数量则默认为1。
例如:
输入 --> 解析结果
'123456' --> {'123456': '1'}
'123456,123789' --> {'123456': '1', '123789': '1'}
'123456:1,123789:3' --> {'123456': '1', '123789': '3'}
'123456:2,123789' --> {'123456': '2', '123789': '1'}
:param sku_ids: 商品id字符串
:return: dict
"""
if isinstance(sku_ids, dict): # 防止重复解析
return sku_ids
sku_id_list = list(filter(bool, map(lambda x: x.strip(), sku_ids.split(','))))
result = dict()
for item in sku_id_list:
if ':' in item:
sku_id, count = map(lambda x: x.strip(), item.split(':'))
result[sku_id] = count
else:
result[item] = '1'
return result
def parse_area_id(area):
"""解析地区id字符串:将分隔符替换为下划线 _
:param area: 地区id字符串(使用 _ 或 - 进行分割),如 12_904_3375 或 12-904-3375
:return: 解析后字符串
"""
return area.replace('-', '_')
def split_area_id(area_id):
"""将地区id字符串按照下划线进行切割,构成数组。数组长度不满4位则用'0'进行填充。
:param area_id: 地区id字符串(使用 _ 或 - 进行分割),如 12_904_3375 或 12-904-3375
:return: list
"""
area = re.split('_|-', area_id)
area.extend((4 - len(area)) * ['0'])
return area
def deprecated(func):
"""This decorator is used to mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn(
"Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return new_func
def check_login(func):
"""用户登陆态校验装饰器。若用户未登陆,则调用扫码登陆"""
@functools.wraps(func)
def new_func(self, *args, **kwargs):
if not self.is_login:
logger.info("{0} 需登陆后调用,开始扫码登陆".format(func.__name__))
self.login_by_QRcode()
return func(self, *args, **kwargs)
return new_func
|
zh
| 0.438102
|
#!/usr/bin/env python # -*- coding:utf-8 -*- -----<KEY> -----END PUBLIC KEY----- #USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36' # for Windows # for Linux # for Mac 将商品id字符串解析为字典 商品id字符串采用英文逗号进行分割。 可以在每个id后面用冒号加上数字,代表该商品的数量,如果不加数量则默认为1。 例如: 输入 --> 解析结果 '123456' --> {'123456': '1'} '123456,123789' --> {'123456': '1', '123789': '1'} '123456:1,123789:3' --> {'123456': '1', '123789': '3'} '123456:2,123789' --> {'123456': '2', '123789': '1'} :param sku_ids: 商品id字符串 :return: dict # 防止重复解析 解析地区id字符串:将分隔符替换为下划线 _ :param area: 地区id字符串(使用 _ 或 - 进行分割),如 12_904_3375 或 12-904-3375 :return: 解析后字符串 将地区id字符串按照下划线进行切割,构成数组。数组长度不满4位则用'0'进行填充。 :param area_id: 地区id字符串(使用 _ 或 - 进行分割),如 12_904_3375 或 12-904-3375 :return: list This decorator is used to mark functions as deprecated. It will result in a warning being emitted when the function is used. # turn off filter # reset filter 用户登陆态校验装饰器。若用户未登陆,则调用扫码登陆
| 2.331229
| 2
|
tools/envoy_headersplit/headersplit_test.py
|
vazra/envoy
| 2
|
6625915
|
# Lint as: python3
"""Tests for headersplit."""
import headersplit
import unittest
# libclang imports
from clang.cindex import TranslationUnit, Index
class HeadersplitTest(unittest.TestCase):
# A header contains a simple class print hello world
source_code_hello_world = open("tools/envoy_headersplit/code_corpus/hello.h", "r").read()
# A C++ source code contains definition for several classes
source_class_defn = open("tools/envoy_headersplit/code_corpus/class_defn.h", "r").read()
# almost the same as above, but classes are not enclosed by namespace
source_class_defn_without_namespace = open(
"tools/envoy_headersplit/code_corpus/class_defn_without_namespace.h", "r").read()
# A C++ source code contains method implementations for class_defn.h
source_class_impl = open("tools/envoy_headersplit/code_corpus/class_impl.cc", "r").read()
def test_to_filename(self):
# Test class name with one "mock"
self.assertEqual(headersplit.to_filename("MockAdminStream"), "admin_stream")
# Test class name with two "Mock"
self.assertEqual(
headersplit.to_filename("MockClusterMockPrioritySet"), "cluster_mock_priority_set")
# Test class name with no "Mock"
self.assertEqual(
headersplit.to_filename("TestRetryHostPredicateFactory"),
"test_retry_host_predicate_factory")
def test_get_directives(self):
includes = """// your first c++ program
// NOLINT(namespace-envoy)
#include <iostream>
// random strings
#include "foo/bar"
"""
translation_unit_hello_world = TranslationUnit.from_source(
"tools/envoy_headersplit/code_corpus/hello.h",
options=TranslationUnit.PARSE_SKIP_FUNCTION_BODIES)
self.assertEqual(headersplit.get_directives(translation_unit_hello_world), includes)
def test_class_definitions(self):
idx = Index.create()
translation_unit_class_defn = idx.parse(
"tools/envoy_headersplit/code_corpus/class_defn.h", ["-x", "c++"])
defns_cursors = headersplit.class_definitions(translation_unit_class_defn.cursor)
defns_names = [cursor.spelling for cursor in defns_cursors]
self.assertEqual(defns_names, ["Foo", "Bar", "FooBar", "DeadBeaf"])
idx = Index.create()
translation_unit_class_defn = idx.parse(
"tools/envoy_headersplit/code_corpus/class_defn_without_namespace.h", ["-x", "c++"])
defns_cursors = headersplit.class_definitions(translation_unit_class_defn.cursor)
defns_names = [cursor.spelling for cursor in defns_cursors]
self.assertEqual(defns_names, [])
def test_class_implementations(self):
translation_unit_class_impl = TranslationUnit.from_source(
"tools/envoy_headersplit/code_corpus/class_impl.cc",
options=TranslationUnit.PARSE_SKIP_FUNCTION_BODIES)
impls_cursors = headersplit.class_implementations(translation_unit_class_impl.cursor)
impls_names = [cursor.spelling for cursor in impls_cursors]
self.assertEqual(impls_names, ["getFoo", "val", "DeadBeaf"])
def test_class_implementations_error(self):
# LibClang will fail in parse this source file (it's modified from the original
# test/server/mocks.cc from Envoy repository) if we don't add flag PARSE_SKIP_FUNCTION_BODIES
# to ignore function bodies.
impl_translation_unit = TranslationUnit.from_source(
"tools/envoy_headersplit/code_corpus/fail_mocks.cc")
impls_cursors = headersplit.class_implementations(impl_translation_unit.cursor)
# impls_name is not complete in this case
impls_names = [cursor.spelling for cursor in impls_cursors]
# LibClang will stop parsing at
# MockListenerComponentFactory::MockListenerComponentFactory()
# : socket_(std::make_shared<NiceMock<Network::MockListenSocket>>()) {
# ^
# Since parsing stops early, we will have incomplete method list.
# The reason is not clear, however, this issue can be addressed by adding parsing flag to
# ignore function body
# get correct list of member methods
impl_translation_unit_correct = TranslationUnit.from_source(
"tools/envoy_headersplit/code_corpus/fail_mocks.cc",
options=TranslationUnit.PARSE_SKIP_FUNCTION_BODIES)
impls_cursors_correct = headersplit.class_implementations(
impl_translation_unit_correct.cursor)
impls_names_correct = [cursor.spelling for cursor in impls_cursors_correct]
self.assertNotEqual(impls_names, impls_names_correct)
if __name__ == "__main__":
unittest.main()
|
# Lint as: python3
"""Tests for headersplit."""
import headersplit
import unittest
# libclang imports
from clang.cindex import TranslationUnit, Index
class HeadersplitTest(unittest.TestCase):
# A header contains a simple class print hello world
source_code_hello_world = open("tools/envoy_headersplit/code_corpus/hello.h", "r").read()
# A C++ source code contains definition for several classes
source_class_defn = open("tools/envoy_headersplit/code_corpus/class_defn.h", "r").read()
# almost the same as above, but classes are not enclosed by namespace
source_class_defn_without_namespace = open(
"tools/envoy_headersplit/code_corpus/class_defn_without_namespace.h", "r").read()
# A C++ source code contains method implementations for class_defn.h
source_class_impl = open("tools/envoy_headersplit/code_corpus/class_impl.cc", "r").read()
def test_to_filename(self):
# Test class name with one "mock"
self.assertEqual(headersplit.to_filename("MockAdminStream"), "admin_stream")
# Test class name with two "Mock"
self.assertEqual(
headersplit.to_filename("MockClusterMockPrioritySet"), "cluster_mock_priority_set")
# Test class name with no "Mock"
self.assertEqual(
headersplit.to_filename("TestRetryHostPredicateFactory"),
"test_retry_host_predicate_factory")
def test_get_directives(self):
includes = """// your first c++ program
// NOLINT(namespace-envoy)
#include <iostream>
// random strings
#include "foo/bar"
"""
translation_unit_hello_world = TranslationUnit.from_source(
"tools/envoy_headersplit/code_corpus/hello.h",
options=TranslationUnit.PARSE_SKIP_FUNCTION_BODIES)
self.assertEqual(headersplit.get_directives(translation_unit_hello_world), includes)
def test_class_definitions(self):
idx = Index.create()
translation_unit_class_defn = idx.parse(
"tools/envoy_headersplit/code_corpus/class_defn.h", ["-x", "c++"])
defns_cursors = headersplit.class_definitions(translation_unit_class_defn.cursor)
defns_names = [cursor.spelling for cursor in defns_cursors]
self.assertEqual(defns_names, ["Foo", "Bar", "FooBar", "DeadBeaf"])
idx = Index.create()
translation_unit_class_defn = idx.parse(
"tools/envoy_headersplit/code_corpus/class_defn_without_namespace.h", ["-x", "c++"])
defns_cursors = headersplit.class_definitions(translation_unit_class_defn.cursor)
defns_names = [cursor.spelling for cursor in defns_cursors]
self.assertEqual(defns_names, [])
def test_class_implementations(self):
translation_unit_class_impl = TranslationUnit.from_source(
"tools/envoy_headersplit/code_corpus/class_impl.cc",
options=TranslationUnit.PARSE_SKIP_FUNCTION_BODIES)
impls_cursors = headersplit.class_implementations(translation_unit_class_impl.cursor)
impls_names = [cursor.spelling for cursor in impls_cursors]
self.assertEqual(impls_names, ["getFoo", "val", "DeadBeaf"])
def test_class_implementations_error(self):
# LibClang will fail in parse this source file (it's modified from the original
# test/server/mocks.cc from Envoy repository) if we don't add flag PARSE_SKIP_FUNCTION_BODIES
# to ignore function bodies.
impl_translation_unit = TranslationUnit.from_source(
"tools/envoy_headersplit/code_corpus/fail_mocks.cc")
impls_cursors = headersplit.class_implementations(impl_translation_unit.cursor)
# impls_name is not complete in this case
impls_names = [cursor.spelling for cursor in impls_cursors]
# LibClang will stop parsing at
# MockListenerComponentFactory::MockListenerComponentFactory()
# : socket_(std::make_shared<NiceMock<Network::MockListenSocket>>()) {
# ^
# Since parsing stops early, we will have incomplete method list.
# The reason is not clear, however, this issue can be addressed by adding parsing flag to
# ignore function body
# get correct list of member methods
impl_translation_unit_correct = TranslationUnit.from_source(
"tools/envoy_headersplit/code_corpus/fail_mocks.cc",
options=TranslationUnit.PARSE_SKIP_FUNCTION_BODIES)
impls_cursors_correct = headersplit.class_implementations(
impl_translation_unit_correct.cursor)
impls_names_correct = [cursor.spelling for cursor in impls_cursors_correct]
self.assertNotEqual(impls_names, impls_names_correct)
if __name__ == "__main__":
unittest.main()
|
en
| 0.756739
|
# Lint as: python3 Tests for headersplit. # libclang imports # A header contains a simple class print hello world # A C++ source code contains definition for several classes # almost the same as above, but classes are not enclosed by namespace # A C++ source code contains method implementations for class_defn.h # Test class name with one "mock" # Test class name with two "Mock" # Test class name with no "Mock" // your first c++ program // NOLINT(namespace-envoy) #include <iostream> // random strings #include "foo/bar" # LibClang will fail in parse this source file (it's modified from the original # test/server/mocks.cc from Envoy repository) if we don't add flag PARSE_SKIP_FUNCTION_BODIES # to ignore function bodies. # impls_name is not complete in this case # LibClang will stop parsing at # MockListenerComponentFactory::MockListenerComponentFactory() # : socket_(std::make_shared<NiceMock<Network::MockListenSocket>>()) { # ^ # Since parsing stops early, we will have incomplete method list. # The reason is not clear, however, this issue can be addressed by adding parsing flag to # ignore function body # get correct list of member methods
| 2.420217
| 2
|
labm8/py/dockerutil.py
|
ChrisCummins/labm8
| 3
|
6625916
|
"""A module for launching docker images from within python applications."""
import contextlib
import pathlib
import random
import subprocess
import typing
from labm8.py import app
from labm8.py import bazelutil
from labm8.py import labtypes
def IsDockerContainer() -> bool:
"""Determine if running inside a docker container."""
return pathlib.Path("/.dockerenv").is_file()
def _Docker(cmd: typing.List[str], timeout: int = 60):
"""Build a docker process invocation."""
cmd = ["timeout", "-s9", str(timeout), "docker"] + [str(s) for s in cmd]
app.Log(2, "$ %s", " ".join(cmd))
return cmd
class DockerImageRunContext(object):
"""A transient context for running docker images."""
def __init__(self, image_name: str):
self.image_name = image_name
def _CommandLineInvocation(
self,
args: typing.List[str],
flags: typing.Dict[str, str],
volumes: typing.Dict[typing.Union[str, pathlib.Path], str],
timeout: int,
entrypoint: typing.Optional[str],
) -> typing.List[str]:
"""Build the command line arguments to execute the requested command.
Args:
args: A list of string positional arguments to pass to the docker image.
flags: A map of flag arguments. The keys are prefixed with '--' and
concatenated with the keys to produce arguments. E.g. {"foo": 2}
equates to ["--foo", "2"].
volumes: A map of shared volumes, e.g. {"/tmp": "/foo"} equates to
the argument "-v/tmp:/foo". It is the responsibility of the calling
code to ensure that the host paths are accessible to docker, and have
the appropriate permissions for the docker user to access / modify. See
//labm8/py:dockerutil_test for an example.
timeout: The number of seconds to allow the image to run for before being
killed. Killed processes will exit with returncode 9.
entrypoint: An optional entrypoint for the docker image, equivalent to
["--entrypoint", entrypoint].
Returns:
The command line as a list of strings.
"""
entrypoint_args = ["--entrypoint", entrypoint] if entrypoint else []
volume_args = [f"-v{src}:{dst}" for src, dst in (volumes or {}).items()]
flags_args = labtypes.flatten(
[[f"--{k}", str(v)] for k, v in (flags or {}).items()],
)
return _Docker(
["run", "--rm"]
+ entrypoint_args
+ volume_args
+ [self.image_name]
+ args
+ flags_args,
timeout,
)
def CheckCall(
self,
args: typing.List[str],
flags: typing.Dict[str, str] = None,
volumes: typing.Dict[typing.Union[str, pathlib.Path], str] = None,
timeout: int = 600,
entrypoint: str = None,
) -> None:
"""Run the docker image with specified args.
This attempts to emulate the behavior of subproces.check_call() for
docker images.
See _CommandLineInvocation() for details on args.
"""
cmd = self._CommandLineInvocation(args, flags, volumes, timeout, entrypoint)
subprocess.check_call(cmd)
def CheckOutput(
self,
args: typing.List[str],
flags: typing.Dict[str, str] = None,
volumes: typing.Dict[typing.Union[str, pathlib.Path], str] = None,
timeout: int = 600,
entrypoint: str = None,
) -> str:
"""Run the docker image with specified args and return its output.
This attempts to emulate the behavior of subproces.check_output() for
docker images.
See _CommandLineInvocation() for details on args.
"""
cmd = self._CommandLineInvocation(args, flags, volumes, timeout, entrypoint)
return subprocess.check_output(cmd, universal_newlines=True)
class BazelPy3Image(object):
"""A docker image created using bazel's py3_image() rule.
To use a py3_image with this class, add the py3_image target with a ".tar"
suffix as a data dependency to the bazel target, e.g.
load("@io_bazel_rules_docker//python3:image.bzl", "py3_image")
py3_image(
name = "my_image",
srcs = ["my_image.py"],
)
py_binary(
name = "my_app",
srcs = ["my_app.py"],
data = [
":my_image.tar",
],
deps = [
"//labm8/py:app",
"//labm8/py:dockerutil",
],
)
"""
def __init__(self, data_path: str):
"""Constructor.
Args:
path: The path to the data, including the name of the workspace.
Raises:
FileNotFoundError: If path is not a file.
"""
super(BazelPy3Image, self).__init__()
self.data_path = data_path
self.tar_path = bazelutil.DataPath(f"phd/{data_path}.tar")
components = self.data_path.split("/")
self.image_name = f'bazel/{"/".join(components[:-1])}:{components[-1]}'
def _TemporaryImageName(self) -> str:
basename = self.data_path.split("/")[-1]
random_suffix = "".join(
random.choice("0123456789abcdef") for _ in range(32)
)
return f"phd_{basename}_tmp_{random_suffix}"
@contextlib.contextmanager
def RunContext(self) -> DockerImageRunContext:
subprocess.check_call(
_Docker(["load", "-i", str(self.tar_path)], timeout=600),
)
tmp_name = self._TemporaryImageName()
subprocess.check_call(
_Docker(["tag", self.image_name, tmp_name], timeout=60),
)
subprocess.check_call(
_Docker(["rmi", "--force", self.image_name], timeout=60)
)
yield DockerImageRunContext(tmp_name)
# FIXME(cec): Using the --force flag here is almost certainly the wrong
# thing, but I'm getting strange errors when trying to untag the image
# otherwise:
# Error response from daemon: conflict: unable to remove repository
# reference "phd_..." (must force) - container ... is using its
# referenced image ...
subprocess.check_call(_Docker(["rmi", "--force", tmp_name], timeout=60))
|
"""A module for launching docker images from within python applications."""
import contextlib
import pathlib
import random
import subprocess
import typing
from labm8.py import app
from labm8.py import bazelutil
from labm8.py import labtypes
def IsDockerContainer() -> bool:
"""Determine if running inside a docker container."""
return pathlib.Path("/.dockerenv").is_file()
def _Docker(cmd: typing.List[str], timeout: int = 60):
"""Build a docker process invocation."""
cmd = ["timeout", "-s9", str(timeout), "docker"] + [str(s) for s in cmd]
app.Log(2, "$ %s", " ".join(cmd))
return cmd
class DockerImageRunContext(object):
"""A transient context for running docker images."""
def __init__(self, image_name: str):
self.image_name = image_name
def _CommandLineInvocation(
self,
args: typing.List[str],
flags: typing.Dict[str, str],
volumes: typing.Dict[typing.Union[str, pathlib.Path], str],
timeout: int,
entrypoint: typing.Optional[str],
) -> typing.List[str]:
"""Build the command line arguments to execute the requested command.
Args:
args: A list of string positional arguments to pass to the docker image.
flags: A map of flag arguments. The keys are prefixed with '--' and
concatenated with the keys to produce arguments. E.g. {"foo": 2}
equates to ["--foo", "2"].
volumes: A map of shared volumes, e.g. {"/tmp": "/foo"} equates to
the argument "-v/tmp:/foo". It is the responsibility of the calling
code to ensure that the host paths are accessible to docker, and have
the appropriate permissions for the docker user to access / modify. See
//labm8/py:dockerutil_test for an example.
timeout: The number of seconds to allow the image to run for before being
killed. Killed processes will exit with returncode 9.
entrypoint: An optional entrypoint for the docker image, equivalent to
["--entrypoint", entrypoint].
Returns:
The command line as a list of strings.
"""
entrypoint_args = ["--entrypoint", entrypoint] if entrypoint else []
volume_args = [f"-v{src}:{dst}" for src, dst in (volumes or {}).items()]
flags_args = labtypes.flatten(
[[f"--{k}", str(v)] for k, v in (flags or {}).items()],
)
return _Docker(
["run", "--rm"]
+ entrypoint_args
+ volume_args
+ [self.image_name]
+ args
+ flags_args,
timeout,
)
def CheckCall(
self,
args: typing.List[str],
flags: typing.Dict[str, str] = None,
volumes: typing.Dict[typing.Union[str, pathlib.Path], str] = None,
timeout: int = 600,
entrypoint: str = None,
) -> None:
"""Run the docker image with specified args.
This attempts to emulate the behavior of subproces.check_call() for
docker images.
See _CommandLineInvocation() for details on args.
"""
cmd = self._CommandLineInvocation(args, flags, volumes, timeout, entrypoint)
subprocess.check_call(cmd)
def CheckOutput(
self,
args: typing.List[str],
flags: typing.Dict[str, str] = None,
volumes: typing.Dict[typing.Union[str, pathlib.Path], str] = None,
timeout: int = 600,
entrypoint: str = None,
) -> str:
"""Run the docker image with specified args and return its output.
This attempts to emulate the behavior of subproces.check_output() for
docker images.
See _CommandLineInvocation() for details on args.
"""
cmd = self._CommandLineInvocation(args, flags, volumes, timeout, entrypoint)
return subprocess.check_output(cmd, universal_newlines=True)
class BazelPy3Image(object):
"""A docker image created using bazel's py3_image() rule.
To use a py3_image with this class, add the py3_image target with a ".tar"
suffix as a data dependency to the bazel target, e.g.
load("@io_bazel_rules_docker//python3:image.bzl", "py3_image")
py3_image(
name = "my_image",
srcs = ["my_image.py"],
)
py_binary(
name = "my_app",
srcs = ["my_app.py"],
data = [
":my_image.tar",
],
deps = [
"//labm8/py:app",
"//labm8/py:dockerutil",
],
)
"""
def __init__(self, data_path: str):
"""Constructor.
Args:
path: The path to the data, including the name of the workspace.
Raises:
FileNotFoundError: If path is not a file.
"""
super(BazelPy3Image, self).__init__()
self.data_path = data_path
self.tar_path = bazelutil.DataPath(f"phd/{data_path}.tar")
components = self.data_path.split("/")
self.image_name = f'bazel/{"/".join(components[:-1])}:{components[-1]}'
def _TemporaryImageName(self) -> str:
basename = self.data_path.split("/")[-1]
random_suffix = "".join(
random.choice("0123456789abcdef") for _ in range(32)
)
return f"phd_{basename}_tmp_{random_suffix}"
@contextlib.contextmanager
def RunContext(self) -> DockerImageRunContext:
subprocess.check_call(
_Docker(["load", "-i", str(self.tar_path)], timeout=600),
)
tmp_name = self._TemporaryImageName()
subprocess.check_call(
_Docker(["tag", self.image_name, tmp_name], timeout=60),
)
subprocess.check_call(
_Docker(["rmi", "--force", self.image_name], timeout=60)
)
yield DockerImageRunContext(tmp_name)
# FIXME(cec): Using the --force flag here is almost certainly the wrong
# thing, but I'm getting strange errors when trying to untag the image
# otherwise:
# Error response from daemon: conflict: unable to remove repository
# reference "phd_..." (must force) - container ... is using its
# referenced image ...
subprocess.check_call(_Docker(["rmi", "--force", tmp_name], timeout=60))
|
en
| 0.744011
|
A module for launching docker images from within python applications. Determine if running inside a docker container. Build a docker process invocation. A transient context for running docker images. Build the command line arguments to execute the requested command. Args: args: A list of string positional arguments to pass to the docker image. flags: A map of flag arguments. The keys are prefixed with '--' and concatenated with the keys to produce arguments. E.g. {"foo": 2} equates to ["--foo", "2"]. volumes: A map of shared volumes, e.g. {"/tmp": "/foo"} equates to the argument "-v/tmp:/foo". It is the responsibility of the calling code to ensure that the host paths are accessible to docker, and have the appropriate permissions for the docker user to access / modify. See //labm8/py:dockerutil_test for an example. timeout: The number of seconds to allow the image to run for before being killed. Killed processes will exit with returncode 9. entrypoint: An optional entrypoint for the docker image, equivalent to ["--entrypoint", entrypoint]. Returns: The command line as a list of strings. Run the docker image with specified args. This attempts to emulate the behavior of subproces.check_call() for docker images. See _CommandLineInvocation() for details on args. Run the docker image with specified args and return its output. This attempts to emulate the behavior of subproces.check_output() for docker images. See _CommandLineInvocation() for details on args. A docker image created using bazel's py3_image() rule. To use a py3_image with this class, add the py3_image target with a ".tar" suffix as a data dependency to the bazel target, e.g. load("@io_bazel_rules_docker//python3:image.bzl", "py3_image") py3_image( name = "my_image", srcs = ["my_image.py"], ) py_binary( name = "my_app", srcs = ["my_app.py"], data = [ ":my_image.tar", ], deps = [ "//labm8/py:app", "//labm8/py:dockerutil", ], ) Constructor. Args: path: The path to the data, including the name of the workspace. Raises: FileNotFoundError: If path is not a file. # FIXME(cec): Using the --force flag here is almost certainly the wrong # thing, but I'm getting strange errors when trying to untag the image # otherwise: # Error response from daemon: conflict: unable to remove repository # reference "phd_..." (must force) - container ... is using its # referenced image ...
| 3.005674
| 3
|
bazel/python_toolchain.bzl
|
khansiddiquekc/magma
| 539
|
6625917
|
# Copyright 2021 The Magma Authors.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python toolchain configuration"""
load("@rules_python//python:defs.bzl", "py_runtime", "py_runtime_pair")
def configure_python_toolchain(name = None):
py_runtime(
name = "python3",
interpreter_path = "/usr/bin/python3.8",
python_version = "PY3",
visibility = ["//visibility:public"],
)
py_runtime_pair(
name = "py_runtime_pair",
py2_runtime = None,
py3_runtime = ":python3",
visibility = ["//visibility:public"],
)
native.toolchain(
name = "py_toolchain",
toolchain = ":py_runtime_pair",
toolchain_type = "@bazel_tools//tools/python:toolchain_type",
visibility = ["//visibility:public"],
)
|
# Copyright 2021 The Magma Authors.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python toolchain configuration"""
load("@rules_python//python:defs.bzl", "py_runtime", "py_runtime_pair")
def configure_python_toolchain(name = None):
py_runtime(
name = "python3",
interpreter_path = "/usr/bin/python3.8",
python_version = "PY3",
visibility = ["//visibility:public"],
)
py_runtime_pair(
name = "py_runtime_pair",
py2_runtime = None,
py3_runtime = ":python3",
visibility = ["//visibility:public"],
)
native.toolchain(
name = "py_toolchain",
toolchain = ":py_runtime_pair",
toolchain_type = "@bazel_tools//tools/python:toolchain_type",
visibility = ["//visibility:public"],
)
|
en
| 0.863951
|
# Copyright 2021 The Magma Authors. # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Python toolchain configuration
| 1.480795
| 1
|
delete.py
|
Max00355/ByteMail-1
| 4
|
6625918
|
<gh_stars>1-10
import db
import ssl
import json
from rsa import *
import base64
def delete(obj, ip, data):
message = db.messages.find("messages", {"id":data['id']})
pubkey_expression = db.nodes.find("nodes", {"addr":data['to']})[0]['publickey']
if pubkey_expression.startswith("PublicKey(") and pubkey_expression.endswith(")"):
try:
verify("delete" + data['id'], base64.b64decode(data['signature']),eval(pubkey_expression))
except Exception, error:
return
try:
db.messages.remove("messages", message[0])
except IndexError:
pass
def send_delete(id, addr):
message = db.messages.find("messages", {"id":id})
if not message:
return "Message with that ID doesn't exist."
else:
db.messages.remove("messages", message[0])
nodes = db.nodes.find("nodes", "all")
signature = base64.b64encode(sign("delete" + id, eval(db.data.find("data","all")[0]['privatekey']),"SHA-1"))
for x in nodes:
try:
sock = ssl.socket()
sock.settimeout(1)
sock.connect((x['ip'], x['port']))
sock.send(json.dumps({"cmd":"delete", "to":addr, "id":id, "signature":signature}))
except:
db.unsent.insert("unsent", {"to":[x['ip'], x['port']], "message":{"cmd":"delete", "to":addr, "id":id, "signature":signature}})
sock.close()
return "Message Removed!"
def send_delete_all(addr):
messages = db.messages.find("messages", {"to":addr})
if not messages:
return "No messages to delete!"
else:
for msg in messages:
send_delete(msg['id'],addr)
return "Success!"
|
import db
import ssl
import json
from rsa import *
import base64
def delete(obj, ip, data):
message = db.messages.find("messages", {"id":data['id']})
pubkey_expression = db.nodes.find("nodes", {"addr":data['to']})[0]['publickey']
if pubkey_expression.startswith("PublicKey(") and pubkey_expression.endswith(")"):
try:
verify("delete" + data['id'], base64.b64decode(data['signature']),eval(pubkey_expression))
except Exception, error:
return
try:
db.messages.remove("messages", message[0])
except IndexError:
pass
def send_delete(id, addr):
message = db.messages.find("messages", {"id":id})
if not message:
return "Message with that ID doesn't exist."
else:
db.messages.remove("messages", message[0])
nodes = db.nodes.find("nodes", "all")
signature = base64.b64encode(sign("delete" + id, eval(db.data.find("data","all")[0]['privatekey']),"SHA-1"))
for x in nodes:
try:
sock = ssl.socket()
sock.settimeout(1)
sock.connect((x['ip'], x['port']))
sock.send(json.dumps({"cmd":"delete", "to":addr, "id":id, "signature":signature}))
except:
db.unsent.insert("unsent", {"to":[x['ip'], x['port']], "message":{"cmd":"delete", "to":addr, "id":id, "signature":signature}})
sock.close()
return "Message Removed!"
def send_delete_all(addr):
messages = db.messages.find("messages", {"to":addr})
if not messages:
return "No messages to delete!"
else:
for msg in messages:
send_delete(msg['id'],addr)
return "Success!"
|
none
| 1
| 2.630599
| 3
|
|
isi_mip/pages/migrations/0041_auto_20210525_1051.py
|
ISI-MIP/isimip
| 0
|
6625919
|
<filename>isi_mip/pages/migrations/0041_auto_20210525_1051.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2021-05-25 08:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0040_paperpage_is_peer_reviewed'),
]
operations = [
migrations.RemoveField(
model_name='paperpage',
name='is_peer_reviewed',
),
migrations.AddField(
model_name='paperpage',
name='is_not_peer_reviewed',
field=models.BooleanField(default=False),
),
]
|
<filename>isi_mip/pages/migrations/0041_auto_20210525_1051.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2021-05-25 08:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0040_paperpage_is_peer_reviewed'),
]
operations = [
migrations.RemoveField(
model_name='paperpage',
name='is_peer_reviewed',
),
migrations.AddField(
model_name='paperpage',
name='is_not_peer_reviewed',
field=models.BooleanField(default=False),
),
]
|
en
| 0.774162
|
# -*- coding: utf-8 -*- # Generated by Django 1.11.16 on 2021-05-25 08:51
| 1.237673
| 1
|
tensorflow_estimator/python/estimator/tpu/tpu_estimator_integration_test.py
|
pranve/estimator
| 0
|
6625920
|
<filename>tensorflow_estimator/python/estimator/tpu/tpu_estimator_integration_test.py<gh_stars>0
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPUEstimator."""
import contextlib
import tempfile
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.export import export_output
from tensorflow_estimator.python.estimator.tpu import tpu_config
from tensorflow_estimator.python.estimator.tpu import tpu_estimator
# pylint: enable=g-direct-tensorflow-import
flags.DEFINE_integer('test_num_shards', 8, 'number of replicas to test')
FLAGS = flags.FLAGS
_TRAIN = model_fn_lib.ModeKeys.TRAIN
_EVAL = model_fn_lib.ModeKeys.EVAL
_PREDICT = model_fn_lib.ModeKeys.PREDICT
_PER_HOST = 'per_host_sharding'
_PER_SHARD = 'per_shard_sharding'
_UNSHARDED = 'unsharded'
_INPUT_PIPELINE_WITH_QUEUE_RUNNER = (
'Input pipeline contains one or more QueueRunners')
def dense_computation(features):
return tf.layers.dense(
features['x'], 1, kernel_initializer=tf.zeros_initializer())
def model_fn_global_step_incrementer(features, labels, mode, params):
del params
loss = None
train_op = None
predictions = dense_computation(features)
if mode != _PREDICT:
loss = tf.losses.mean_squared_error(labels, predictions)
optimizer = tf.tpu.CrossShardOptimizer(
tf.train.GradientDescentOptimizer(learning_rate=0.5))
train_op = optimizer.minimize(loss, tf.train.get_global_step())
return tpu_estimator.TPUEstimatorSpec(
mode,
loss=loss,
train_op=train_op,
predictions={'predictions': predictions},
export_outputs={
'test': export_output.PredictOutput({
'prediction': predictions
})
})
def dummy_input_fn_with_dataset(batch_size, repeat=True, x=None):
if x is None:
x = np.random.normal(size=[batch_size, 1]).astype(np.float32)
labels = [[2.0]] * batch_size
dataset1 = tf.data.Dataset.from_tensor_slices(x)
dataset2 = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((dataset1, dataset2))
if repeat:
dataset = dataset.repeat()
dataset = dataset.batch(batch_size, drop_remainder=True)
def _map(x, y):
return {'x': x}, y
return dataset.map(_map)
def dummy_input_fn(batch_size, repeat=True):
dataset = dummy_input_fn_with_dataset(batch_size, repeat)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def create_run_config(iterations_per_loop, **kwargs):
return tpu_config.RunConfig(
master='',
tpu_config=tpu_config.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=FLAGS.test_num_shards,
**kwargs),
)
class TPUEstimatorIntegrationTest(tf.test.TestCase):
def setUp(self):
self._recorded_input_fn_invoke_metadata = {
_TRAIN: {'called_count': 0, 'batch_size': None},
_EVAL: {'called_count': 0, 'batch_size': None},
_PREDICT: {'called_count': 0, 'batch_size': None}
}
self._data = np.linspace(0., 1., 100, dtype=np.float32).reshape(-1, 1)
self._export_mode = False
@contextlib.contextmanager
def export_mode(self):
"""Enable the export mode for model_fn."""
# Inside the model_fn, the test will check the batch size passed via params.
# However, export mode should not have that. It is infeasible for model_fn
# to distinguish the predict vs export mode today. So, this contextmanager
# helps the model_fn to do that.
self._export_mode = True
yield
self._export_mode = False
def assertInputFnCalledCountAndBatch(self, expected_called_count,
expected_batch_size):
real_called_count = {k: v['called_count'] for k, v in
self._recorded_input_fn_invoke_metadata.items()}
real_batch_size = {k: v['batch_size'] for k, v in
self._recorded_input_fn_invoke_metadata.items()}
self.assertEqual(expected_called_count, real_called_count)
self.assertEqual(expected_batch_size, real_batch_size)
def _generate_expected_batch_size_and_called_count(
self,
num_shards,
train_batch_size,
eval_batch_size,
predict_batch_size,
train_sharding_policy=_UNSHARDED,
eval_sharding_policy=_UNSHARDED,
predict_sharding_policy=None):
expected_batch_size_for_model_fn = {}
expected_batch_size_for_input_fn = {}
expected_called_count_for_input_fn = {}
if train_sharding_policy == _PER_SHARD:
self.assertEqual(0, train_batch_size % num_shards)
expected_batch_size_for_model_fn[_TRAIN] = train_batch_size // num_shards
expected_batch_size_for_input_fn[_TRAIN] = train_batch_size // num_shards
expected_called_count_for_input_fn[_TRAIN] = num_shards
elif train_sharding_policy == _PER_HOST:
self.assertEqual(0, train_batch_size % num_shards)
expected_batch_size_for_model_fn[_TRAIN] = train_batch_size // num_shards
expected_batch_size_for_input_fn[_TRAIN] = train_batch_size
expected_called_count_for_input_fn[_TRAIN] = 1
else:
expected_batch_size_for_model_fn[_TRAIN] = train_batch_size
expected_batch_size_for_input_fn[_TRAIN] = train_batch_size
expected_called_count_for_input_fn[_TRAIN] = 1
if eval_sharding_policy == _PER_HOST:
self.assertEqual(0, train_batch_size % num_shards)
expected_batch_size_for_model_fn[_EVAL] = eval_batch_size // num_shards
expected_batch_size_for_input_fn[_EVAL] = eval_batch_size
expected_called_count_for_input_fn[_EVAL] = 1
else:
expected_batch_size_for_model_fn[_EVAL] = eval_batch_size
expected_batch_size_for_input_fn[_EVAL] = eval_batch_size
expected_called_count_for_input_fn[_EVAL] = 1
if predict_sharding_policy is None:
# On CPU.
expected_batch_size_for_model_fn[_PREDICT] = predict_batch_size
expected_batch_size_for_input_fn[_PREDICT] = predict_batch_size
expected_called_count_for_input_fn[_PREDICT] = 1
else:
expected_batch_size_for_model_fn[_PREDICT] = (
predict_batch_size // num_shards)
expected_batch_size_for_input_fn[_PREDICT] = predict_batch_size
expected_called_count_for_input_fn[_PREDICT] = 1
return (expected_batch_size_for_model_fn, expected_batch_size_for_input_fn,
expected_called_count_for_input_fn)
def _wrap_input_fn_with_batch_size(self, batch_size, input_fn):
def _input_fn(params):
self.assertNotIn('batch_size', params)
params['batch_size'] = batch_size
return input_fn(params)
return _input_fn
def _make_input_fn(self, mode, repeat=False, take=None):
metadata = self._recorded_input_fn_invoke_metadata[mode]
def _input_fn(params):
metadata['called_count'] += 1
batch_size = params['batch_size']
if metadata['batch_size'] is None:
metadata['batch_size'] = batch_size
else:
self.assertEqual(batch_size, metadata['batch_size'])
dataset1 = tf.data.Dataset.from_tensor_slices(self._data)
dataset2 = tf.data.Dataset.from_tensor_slices(self._data)
dataset = tf.data.Dataset.zip((dataset1, dataset2))
if repeat:
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
if take:
dataset = dataset.take(take)
def _map_fn(x, y):
x.set_shape([batch_size, 1])
y.set_shape([batch_size, 1])
return {'x': x}, y
dataset = dataset.map(_map_fn)
return dataset
return _input_fn
def _make_model_fn(self, batch_size_dict, use_tpu_estimator_spec=False):
def _create_estimator_spec(mode, loss=None, predictions=None,
export_outputs=None, eval_metrics=None,
train_op=None):
if use_tpu_estimator_spec:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
predictions=predictions,
export_outputs=export_outputs,
eval_metrics=eval_metrics)
else:
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
predictions=predictions,
export_outputs=export_outputs,
eval_metric_ops=(eval_metrics[0](*eval_metrics[1]) if eval_metrics
else None))
def _model_fn(features, labels, mode, params):
if not self._export_mode:
# Always check batch size in params
self.assertEqual(batch_size_dict[mode], params['batch_size'])
else:
self.assertNotIn('batch_size', params)
# Check the input feeds correct shape for train and eval. When eval on CPU
# or predict, it is allowed to have dynamic shape. So, here only validates
# the fully known shape (which covers the TPU train).
if features['x'].shape.is_fully_defined():
self.assertEqual(batch_size_dict[mode], features['x'].shape[0])
predictions = tf.layers.dense(
features['x'], 1,
kernel_initializer=tf.ones_initializer())
export_outputs = {
'predictions': export_output.RegressionOutput(predictions)
}
if mode == _PREDICT:
return _create_estimator_spec(
mode=mode,
predictions={'predictions': predictions},
export_outputs=export_outputs)
loss = tf.losses.mean_squared_error(labels, predictions)
optimizer = tf.tpu.CrossShardOptimizer(
tf.train.GradientDescentOptimizer(learning_rate=0.5))
train_op = optimizer.minimize(loss,
global_step=tf.train.get_global_step())
eval_metrics = (
lambda labels, predictions: { # pylint: disable=g-long-lambda
'absolute_error': tf.metrics.mean_absolute_error(
labels, predictions)},
[labels, predictions])
return _create_estimator_spec(
mode=mode,
loss=loss,
predictions={'predictions': predictions},
export_outputs=export_outputs,
train_op=train_op,
eval_metrics=eval_metrics)
return _model_fn
def _test_identity_savedmodel(self, export_dir):
with tf.Graph().as_default() as graph:
with tf.Session(graph=graph) as sess:
metagraph_def = tf.saved_model.loader.load(sess, [tf.saved_model.SERVING], export_dir)
fetch = metagraph_def.signature_def['predictions'].outputs['outputs']
feed = metagraph_def.signature_def['predictions'].inputs['inputs']
for x in self._data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(
value=np.ravel(x)))
})).SerializeToString()
y = sess.run(fetch.name, feed_dict={feed.name: [example]})
self.assertAlmostEqual(y, x[0], delta=0.01)
def test_complete_flow_with_per_core_input(self):
# Choose the train_batch_size divisible by 2 and 8 (common shards in test
# env) and batch_size for eval and predict prime number.
train_batch_size = 16
eval_batch_size = 16
predict_batch_size = 8
run_config = create_run_config(iterations_per_loop=4,
per_host_input_for_training=False)
num_shards = run_config.tpu_config.num_shards
(expected_batch_size_for_model_fn, expected_batch_size_for_input_fn,
expected_called_count_for_input_fn) = (
self._generate_expected_batch_size_and_called_count(
num_shards,
train_batch_size,
eval_batch_size,
predict_batch_size,
train_sharding_policy=_PER_SHARD,
eval_sharding_policy=_PER_HOST,
predict_sharding_policy=_PER_HOST))
est = tpu_estimator.TPUEstimator(
model_fn=self._make_model_fn(
expected_batch_size_for_model_fn, use_tpu_estimator_spec=True),
config=run_config,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
predict_batch_size=predict_batch_size)
# TRAIN
# learn y = x
# Note: Gradients are all zero. Just testing execution.
def _input_fn(params):
dataset = self._make_input_fn(mode=_TRAIN, repeat=True)(params)
return tf.data.make_one_shot_iterator(dataset).get_next()
train_input_fn = _input_fn
est.train(train_input_fn, steps=7)
# EVALUTE
scores = est.evaluate(self._make_input_fn(mode=_EVAL), steps=6)
self.assertEqual(7, scores['global_step'])
self.assertGreater(0.1, scores['absolute_error'])
# PREDICT
predict_input_fn = self._make_input_fn(mode=_PREDICT, take=2)
predictions = [x['predictions'] for x in est.predict(predict_input_fn)]
self.assertAllClose(
self._data[:predict_batch_size * 2], predictions, atol=0.01)
# Verify all input_fn invoke recorded metadata.
self.assertInputFnCalledCountAndBatch(
expected_called_count_for_input_fn, expected_batch_size_for_input_fn)
# EXPORT
feature_spec = {'x': tf.io.FixedLenFeature([1], tf.float32)}
serving_input_receiver_fn = (
export.build_parsing_serving_input_receiver_fn(feature_spec))
with self.export_mode():
export_dir = est.export_saved_model(
tempfile.mkdtemp(dir=self.get_temp_dir()), serving_input_receiver_fn)
self.assertTrue(tf.gfile.Exists(export_dir))
self._test_identity_savedmodel(export_dir)
def test_complete_flow_with_per_host_input(self):
# Choose the train_batch_size divisible by 2 and 8 (common shards in test
# env) and batch_size for eval and predict prime number.
train_batch_size = 16
eval_batch_size = 16
predict_batch_size = 16
run_config = create_run_config(
iterations_per_loop=4, per_host_input_for_training=True)
num_shards = run_config.tpu_config.num_shards
(expected_batch_size_for_model_fn, expected_batch_size_for_input_fn,
expected_called_count_for_input_fn) = (
self._generate_expected_batch_size_and_called_count(
num_shards,
train_batch_size,
eval_batch_size,
predict_batch_size,
train_sharding_policy=_PER_HOST,
eval_sharding_policy=_PER_HOST,
predict_sharding_policy=_PER_HOST))
est = tpu_estimator.TPUEstimator(
model_fn=self._make_model_fn(
expected_batch_size_for_model_fn, use_tpu_estimator_spec=True),
config=run_config,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
predict_batch_size=predict_batch_size)
# TRAIN
# learn y = x
# Note: Gradients are all zero. Just testing execution.
train_input_fn = self._make_input_fn(mode=_TRAIN, repeat=True)
est.train(train_input_fn, steps=7)
# EVALUTE
scores = est.evaluate(self._make_input_fn(mode=_EVAL), steps=6)
self.assertEqual(7, scores['global_step'])
self.assertGreater(0.1, scores['absolute_error'])
# PREDICT
predict_input_fn = self._make_input_fn(mode=_PREDICT, take=2)
predictions = [x['predictions'] for x in est.predict(predict_input_fn)]
self.assertAllClose(
self._data[:predict_batch_size * 2], predictions, atol=0.01)
# Verify all input_fn invoke recorded metadata.
self.assertInputFnCalledCountAndBatch(
expected_called_count_for_input_fn, expected_batch_size_for_input_fn)
# EXPORT
feature_spec = {'x': tf.io.FixedLenFeature([1], tf.float32)}
serving_input_receiver_fn = (
export.build_parsing_serving_input_receiver_fn(feature_spec))
with self.export_mode():
export_dir = est.export_saved_model(
tempfile.mkdtemp(dir=self.get_temp_dir()), serving_input_receiver_fn)
self.assertTrue(tf.gfile.Exists(export_dir))
self._test_identity_savedmodel(export_dir)
def test_complete_flow_with_eval_on_tpu(self):
# Choose the train_batch_size divisible by 2 and 8 (common shards in test
# env) and batch_size for eval and predict prime number.
train_batch_size = 16
eval_batch_size = 8
predict_batch_size = 8
run_config = create_run_config(iterations_per_loop=4)
num_shards = run_config.tpu_config.num_shards
(expected_batch_size_for_model_fn, expected_batch_size_for_input_fn,
expected_called_count_for_input_fn) = (
self._generate_expected_batch_size_and_called_count(
num_shards,
train_batch_size,
eval_batch_size,
predict_batch_size,
train_sharding_policy=_PER_HOST,
eval_sharding_policy=_PER_HOST,
predict_sharding_policy=_PER_HOST))
est = tpu_estimator.TPUEstimator(
model_fn=self._make_model_fn(
expected_batch_size_for_model_fn, use_tpu_estimator_spec=True),
config=run_config,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
predict_batch_size=predict_batch_size)
# TRAIN
# learn y = x
# Note: Gradients are all zero. Just testing execution.
train_input_fn = self._make_input_fn(mode=_TRAIN, repeat=True)
est.train(train_input_fn, steps=7)
# EVALUTE
eval_input_fn = self._make_input_fn(mode=_EVAL, repeat=False)
scores = est.evaluate(eval_input_fn, steps=2)
self.assertEqual(7, scores['global_step'])
self.assertGreater(0.1, scores['absolute_error'])
# PREDICT
predict_input_fn = self._make_input_fn(mode=_PREDICT, take=2)
predictions = [x['predictions'] for x in est.predict(predict_input_fn)]
self.assertAllClose(
self._data[:predict_batch_size * 2], predictions, atol=0.01)
# Verify all input_fn invoke recorded metadata.
self.assertInputFnCalledCountAndBatch(
expected_called_count_for_input_fn, expected_batch_size_for_input_fn)
# EXPORT
feature_spec = {'x': tf.io.FixedLenFeature([1], tf.float32)}
serving_input_receiver_fn = (
export.build_parsing_serving_input_receiver_fn(feature_spec))
with self.export_mode():
export_dir = est.export_saved_model(
tempfile.mkdtemp(dir=self.get_temp_dir()), serving_input_receiver_fn)
self.assertTrue(tf.gfile.Exists(export_dir))
self._test_identity_savedmodel(export_dir)
def test_complete_flow_with_no_tpu(self):
# Choose the train_batch_size divisible by 2 and 8 (common shards in test
# env) and batch_size for eval and predict prime number.
train_batch_size = 16
eval_batch_size = 8
predict_batch_size = 1
run_config = create_run_config(iterations_per_loop=4)
num_shards = run_config.tpu_config.num_shards
(expected_batch_size_for_model_fn, expected_batch_size_for_input_fn,
expected_called_count_for_input_fn) = (
self._generate_expected_batch_size_and_called_count(
num_shards, train_batch_size, eval_batch_size, predict_batch_size,
train_sharding_policy=_UNSHARDED,
eval_sharding_policy=_UNSHARDED))
est = tpu_estimator.TPUEstimator(
model_fn=self._make_model_fn(
expected_batch_size_for_model_fn, use_tpu_estimator_spec=True),
config=run_config,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
predict_batch_size=predict_batch_size,
use_tpu=False)
# TRAIN
# learn y = x
# Note: Gradients are all zero. Just testing execution.
train_input_fn = self._make_input_fn(mode=_TRAIN, repeat=True)
est.train(train_input_fn, steps=7)
# EVALUTE
eval_input_fn = self._make_input_fn(mode=_EVAL)
scores = est.evaluate(eval_input_fn, steps=2)
self.assertEqual(7, scores['global_step'])
self.assertGreater(0.1, scores['absolute_error'])
# PREDICT
predict_input_fn = self._make_input_fn(mode=_PREDICT)
predictions = [x['predictions'] for x in est.predict(predict_input_fn)]
self.assertAllClose(self._data, predictions, atol=0.01)
# Verify all input_fn invoke recorded metadata.
self.assertInputFnCalledCountAndBatch(
expected_called_count_for_input_fn, expected_batch_size_for_input_fn)
# EXPORT
feature_spec = {'x': tf.io.FixedLenFeature([1], tf.float32)}
serving_input_receiver_fn = (
export.build_parsing_serving_input_receiver_fn(feature_spec))
with self.export_mode():
export_dir = est.export_saved_model(
tempfile.mkdtemp(dir=self.get_temp_dir()), serving_input_receiver_fn)
self.assertTrue(tf.gfile.Exists(export_dir))
self._test_identity_savedmodel(export_dir)
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
|
<filename>tensorflow_estimator/python/estimator/tpu/tpu_estimator_integration_test.py<gh_stars>0
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPUEstimator."""
import contextlib
import tempfile
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.export import export_output
from tensorflow_estimator.python.estimator.tpu import tpu_config
from tensorflow_estimator.python.estimator.tpu import tpu_estimator
# pylint: enable=g-direct-tensorflow-import
flags.DEFINE_integer('test_num_shards', 8, 'number of replicas to test')
FLAGS = flags.FLAGS
_TRAIN = model_fn_lib.ModeKeys.TRAIN
_EVAL = model_fn_lib.ModeKeys.EVAL
_PREDICT = model_fn_lib.ModeKeys.PREDICT
_PER_HOST = 'per_host_sharding'
_PER_SHARD = 'per_shard_sharding'
_UNSHARDED = 'unsharded'
_INPUT_PIPELINE_WITH_QUEUE_RUNNER = (
'Input pipeline contains one or more QueueRunners')
def dense_computation(features):
return tf.layers.dense(
features['x'], 1, kernel_initializer=tf.zeros_initializer())
def model_fn_global_step_incrementer(features, labels, mode, params):
del params
loss = None
train_op = None
predictions = dense_computation(features)
if mode != _PREDICT:
loss = tf.losses.mean_squared_error(labels, predictions)
optimizer = tf.tpu.CrossShardOptimizer(
tf.train.GradientDescentOptimizer(learning_rate=0.5))
train_op = optimizer.minimize(loss, tf.train.get_global_step())
return tpu_estimator.TPUEstimatorSpec(
mode,
loss=loss,
train_op=train_op,
predictions={'predictions': predictions},
export_outputs={
'test': export_output.PredictOutput({
'prediction': predictions
})
})
def dummy_input_fn_with_dataset(batch_size, repeat=True, x=None):
if x is None:
x = np.random.normal(size=[batch_size, 1]).astype(np.float32)
labels = [[2.0]] * batch_size
dataset1 = tf.data.Dataset.from_tensor_slices(x)
dataset2 = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((dataset1, dataset2))
if repeat:
dataset = dataset.repeat()
dataset = dataset.batch(batch_size, drop_remainder=True)
def _map(x, y):
return {'x': x}, y
return dataset.map(_map)
def dummy_input_fn(batch_size, repeat=True):
dataset = dummy_input_fn_with_dataset(batch_size, repeat)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def create_run_config(iterations_per_loop, **kwargs):
return tpu_config.RunConfig(
master='',
tpu_config=tpu_config.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=FLAGS.test_num_shards,
**kwargs),
)
class TPUEstimatorIntegrationTest(tf.test.TestCase):
def setUp(self):
self._recorded_input_fn_invoke_metadata = {
_TRAIN: {'called_count': 0, 'batch_size': None},
_EVAL: {'called_count': 0, 'batch_size': None},
_PREDICT: {'called_count': 0, 'batch_size': None}
}
self._data = np.linspace(0., 1., 100, dtype=np.float32).reshape(-1, 1)
self._export_mode = False
@contextlib.contextmanager
def export_mode(self):
"""Enable the export mode for model_fn."""
# Inside the model_fn, the test will check the batch size passed via params.
# However, export mode should not have that. It is infeasible for model_fn
# to distinguish the predict vs export mode today. So, this contextmanager
# helps the model_fn to do that.
self._export_mode = True
yield
self._export_mode = False
def assertInputFnCalledCountAndBatch(self, expected_called_count,
expected_batch_size):
real_called_count = {k: v['called_count'] for k, v in
self._recorded_input_fn_invoke_metadata.items()}
real_batch_size = {k: v['batch_size'] for k, v in
self._recorded_input_fn_invoke_metadata.items()}
self.assertEqual(expected_called_count, real_called_count)
self.assertEqual(expected_batch_size, real_batch_size)
def _generate_expected_batch_size_and_called_count(
self,
num_shards,
train_batch_size,
eval_batch_size,
predict_batch_size,
train_sharding_policy=_UNSHARDED,
eval_sharding_policy=_UNSHARDED,
predict_sharding_policy=None):
expected_batch_size_for_model_fn = {}
expected_batch_size_for_input_fn = {}
expected_called_count_for_input_fn = {}
if train_sharding_policy == _PER_SHARD:
self.assertEqual(0, train_batch_size % num_shards)
expected_batch_size_for_model_fn[_TRAIN] = train_batch_size // num_shards
expected_batch_size_for_input_fn[_TRAIN] = train_batch_size // num_shards
expected_called_count_for_input_fn[_TRAIN] = num_shards
elif train_sharding_policy == _PER_HOST:
self.assertEqual(0, train_batch_size % num_shards)
expected_batch_size_for_model_fn[_TRAIN] = train_batch_size // num_shards
expected_batch_size_for_input_fn[_TRAIN] = train_batch_size
expected_called_count_for_input_fn[_TRAIN] = 1
else:
expected_batch_size_for_model_fn[_TRAIN] = train_batch_size
expected_batch_size_for_input_fn[_TRAIN] = train_batch_size
expected_called_count_for_input_fn[_TRAIN] = 1
if eval_sharding_policy == _PER_HOST:
self.assertEqual(0, train_batch_size % num_shards)
expected_batch_size_for_model_fn[_EVAL] = eval_batch_size // num_shards
expected_batch_size_for_input_fn[_EVAL] = eval_batch_size
expected_called_count_for_input_fn[_EVAL] = 1
else:
expected_batch_size_for_model_fn[_EVAL] = eval_batch_size
expected_batch_size_for_input_fn[_EVAL] = eval_batch_size
expected_called_count_for_input_fn[_EVAL] = 1
if predict_sharding_policy is None:
# On CPU.
expected_batch_size_for_model_fn[_PREDICT] = predict_batch_size
expected_batch_size_for_input_fn[_PREDICT] = predict_batch_size
expected_called_count_for_input_fn[_PREDICT] = 1
else:
expected_batch_size_for_model_fn[_PREDICT] = (
predict_batch_size // num_shards)
expected_batch_size_for_input_fn[_PREDICT] = predict_batch_size
expected_called_count_for_input_fn[_PREDICT] = 1
return (expected_batch_size_for_model_fn, expected_batch_size_for_input_fn,
expected_called_count_for_input_fn)
def _wrap_input_fn_with_batch_size(self, batch_size, input_fn):
def _input_fn(params):
self.assertNotIn('batch_size', params)
params['batch_size'] = batch_size
return input_fn(params)
return _input_fn
def _make_input_fn(self, mode, repeat=False, take=None):
metadata = self._recorded_input_fn_invoke_metadata[mode]
def _input_fn(params):
metadata['called_count'] += 1
batch_size = params['batch_size']
if metadata['batch_size'] is None:
metadata['batch_size'] = batch_size
else:
self.assertEqual(batch_size, metadata['batch_size'])
dataset1 = tf.data.Dataset.from_tensor_slices(self._data)
dataset2 = tf.data.Dataset.from_tensor_slices(self._data)
dataset = tf.data.Dataset.zip((dataset1, dataset2))
if repeat:
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
if take:
dataset = dataset.take(take)
def _map_fn(x, y):
x.set_shape([batch_size, 1])
y.set_shape([batch_size, 1])
return {'x': x}, y
dataset = dataset.map(_map_fn)
return dataset
return _input_fn
def _make_model_fn(self, batch_size_dict, use_tpu_estimator_spec=False):
def _create_estimator_spec(mode, loss=None, predictions=None,
export_outputs=None, eval_metrics=None,
train_op=None):
if use_tpu_estimator_spec:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
predictions=predictions,
export_outputs=export_outputs,
eval_metrics=eval_metrics)
else:
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
predictions=predictions,
export_outputs=export_outputs,
eval_metric_ops=(eval_metrics[0](*eval_metrics[1]) if eval_metrics
else None))
def _model_fn(features, labels, mode, params):
if not self._export_mode:
# Always check batch size in params
self.assertEqual(batch_size_dict[mode], params['batch_size'])
else:
self.assertNotIn('batch_size', params)
# Check the input feeds correct shape for train and eval. When eval on CPU
# or predict, it is allowed to have dynamic shape. So, here only validates
# the fully known shape (which covers the TPU train).
if features['x'].shape.is_fully_defined():
self.assertEqual(batch_size_dict[mode], features['x'].shape[0])
predictions = tf.layers.dense(
features['x'], 1,
kernel_initializer=tf.ones_initializer())
export_outputs = {
'predictions': export_output.RegressionOutput(predictions)
}
if mode == _PREDICT:
return _create_estimator_spec(
mode=mode,
predictions={'predictions': predictions},
export_outputs=export_outputs)
loss = tf.losses.mean_squared_error(labels, predictions)
optimizer = tf.tpu.CrossShardOptimizer(
tf.train.GradientDescentOptimizer(learning_rate=0.5))
train_op = optimizer.minimize(loss,
global_step=tf.train.get_global_step())
eval_metrics = (
lambda labels, predictions: { # pylint: disable=g-long-lambda
'absolute_error': tf.metrics.mean_absolute_error(
labels, predictions)},
[labels, predictions])
return _create_estimator_spec(
mode=mode,
loss=loss,
predictions={'predictions': predictions},
export_outputs=export_outputs,
train_op=train_op,
eval_metrics=eval_metrics)
return _model_fn
def _test_identity_savedmodel(self, export_dir):
with tf.Graph().as_default() as graph:
with tf.Session(graph=graph) as sess:
metagraph_def = tf.saved_model.loader.load(sess, [tf.saved_model.SERVING], export_dir)
fetch = metagraph_def.signature_def['predictions'].outputs['outputs']
feed = metagraph_def.signature_def['predictions'].inputs['inputs']
for x in self._data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(
value=np.ravel(x)))
})).SerializeToString()
y = sess.run(fetch.name, feed_dict={feed.name: [example]})
self.assertAlmostEqual(y, x[0], delta=0.01)
def test_complete_flow_with_per_core_input(self):
# Choose the train_batch_size divisible by 2 and 8 (common shards in test
# env) and batch_size for eval and predict prime number.
train_batch_size = 16
eval_batch_size = 16
predict_batch_size = 8
run_config = create_run_config(iterations_per_loop=4,
per_host_input_for_training=False)
num_shards = run_config.tpu_config.num_shards
(expected_batch_size_for_model_fn, expected_batch_size_for_input_fn,
expected_called_count_for_input_fn) = (
self._generate_expected_batch_size_and_called_count(
num_shards,
train_batch_size,
eval_batch_size,
predict_batch_size,
train_sharding_policy=_PER_SHARD,
eval_sharding_policy=_PER_HOST,
predict_sharding_policy=_PER_HOST))
est = tpu_estimator.TPUEstimator(
model_fn=self._make_model_fn(
expected_batch_size_for_model_fn, use_tpu_estimator_spec=True),
config=run_config,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
predict_batch_size=predict_batch_size)
# TRAIN
# learn y = x
# Note: Gradients are all zero. Just testing execution.
def _input_fn(params):
dataset = self._make_input_fn(mode=_TRAIN, repeat=True)(params)
return tf.data.make_one_shot_iterator(dataset).get_next()
train_input_fn = _input_fn
est.train(train_input_fn, steps=7)
# EVALUTE
scores = est.evaluate(self._make_input_fn(mode=_EVAL), steps=6)
self.assertEqual(7, scores['global_step'])
self.assertGreater(0.1, scores['absolute_error'])
# PREDICT
predict_input_fn = self._make_input_fn(mode=_PREDICT, take=2)
predictions = [x['predictions'] for x in est.predict(predict_input_fn)]
self.assertAllClose(
self._data[:predict_batch_size * 2], predictions, atol=0.01)
# Verify all input_fn invoke recorded metadata.
self.assertInputFnCalledCountAndBatch(
expected_called_count_for_input_fn, expected_batch_size_for_input_fn)
# EXPORT
feature_spec = {'x': tf.io.FixedLenFeature([1], tf.float32)}
serving_input_receiver_fn = (
export.build_parsing_serving_input_receiver_fn(feature_spec))
with self.export_mode():
export_dir = est.export_saved_model(
tempfile.mkdtemp(dir=self.get_temp_dir()), serving_input_receiver_fn)
self.assertTrue(tf.gfile.Exists(export_dir))
self._test_identity_savedmodel(export_dir)
def test_complete_flow_with_per_host_input(self):
# Choose the train_batch_size divisible by 2 and 8 (common shards in test
# env) and batch_size for eval and predict prime number.
train_batch_size = 16
eval_batch_size = 16
predict_batch_size = 16
run_config = create_run_config(
iterations_per_loop=4, per_host_input_for_training=True)
num_shards = run_config.tpu_config.num_shards
(expected_batch_size_for_model_fn, expected_batch_size_for_input_fn,
expected_called_count_for_input_fn) = (
self._generate_expected_batch_size_and_called_count(
num_shards,
train_batch_size,
eval_batch_size,
predict_batch_size,
train_sharding_policy=_PER_HOST,
eval_sharding_policy=_PER_HOST,
predict_sharding_policy=_PER_HOST))
est = tpu_estimator.TPUEstimator(
model_fn=self._make_model_fn(
expected_batch_size_for_model_fn, use_tpu_estimator_spec=True),
config=run_config,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
predict_batch_size=predict_batch_size)
# TRAIN
# learn y = x
# Note: Gradients are all zero. Just testing execution.
train_input_fn = self._make_input_fn(mode=_TRAIN, repeat=True)
est.train(train_input_fn, steps=7)
# EVALUTE
scores = est.evaluate(self._make_input_fn(mode=_EVAL), steps=6)
self.assertEqual(7, scores['global_step'])
self.assertGreater(0.1, scores['absolute_error'])
# PREDICT
predict_input_fn = self._make_input_fn(mode=_PREDICT, take=2)
predictions = [x['predictions'] for x in est.predict(predict_input_fn)]
self.assertAllClose(
self._data[:predict_batch_size * 2], predictions, atol=0.01)
# Verify all input_fn invoke recorded metadata.
self.assertInputFnCalledCountAndBatch(
expected_called_count_for_input_fn, expected_batch_size_for_input_fn)
# EXPORT
feature_spec = {'x': tf.io.FixedLenFeature([1], tf.float32)}
serving_input_receiver_fn = (
export.build_parsing_serving_input_receiver_fn(feature_spec))
with self.export_mode():
export_dir = est.export_saved_model(
tempfile.mkdtemp(dir=self.get_temp_dir()), serving_input_receiver_fn)
self.assertTrue(tf.gfile.Exists(export_dir))
self._test_identity_savedmodel(export_dir)
def test_complete_flow_with_eval_on_tpu(self):
# Choose the train_batch_size divisible by 2 and 8 (common shards in test
# env) and batch_size for eval and predict prime number.
train_batch_size = 16
eval_batch_size = 8
predict_batch_size = 8
run_config = create_run_config(iterations_per_loop=4)
num_shards = run_config.tpu_config.num_shards
(expected_batch_size_for_model_fn, expected_batch_size_for_input_fn,
expected_called_count_for_input_fn) = (
self._generate_expected_batch_size_and_called_count(
num_shards,
train_batch_size,
eval_batch_size,
predict_batch_size,
train_sharding_policy=_PER_HOST,
eval_sharding_policy=_PER_HOST,
predict_sharding_policy=_PER_HOST))
est = tpu_estimator.TPUEstimator(
model_fn=self._make_model_fn(
expected_batch_size_for_model_fn, use_tpu_estimator_spec=True),
config=run_config,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
predict_batch_size=predict_batch_size)
# TRAIN
# learn y = x
# Note: Gradients are all zero. Just testing execution.
train_input_fn = self._make_input_fn(mode=_TRAIN, repeat=True)
est.train(train_input_fn, steps=7)
# EVALUTE
eval_input_fn = self._make_input_fn(mode=_EVAL, repeat=False)
scores = est.evaluate(eval_input_fn, steps=2)
self.assertEqual(7, scores['global_step'])
self.assertGreater(0.1, scores['absolute_error'])
# PREDICT
predict_input_fn = self._make_input_fn(mode=_PREDICT, take=2)
predictions = [x['predictions'] for x in est.predict(predict_input_fn)]
self.assertAllClose(
self._data[:predict_batch_size * 2], predictions, atol=0.01)
# Verify all input_fn invoke recorded metadata.
self.assertInputFnCalledCountAndBatch(
expected_called_count_for_input_fn, expected_batch_size_for_input_fn)
# EXPORT
feature_spec = {'x': tf.io.FixedLenFeature([1], tf.float32)}
serving_input_receiver_fn = (
export.build_parsing_serving_input_receiver_fn(feature_spec))
with self.export_mode():
export_dir = est.export_saved_model(
tempfile.mkdtemp(dir=self.get_temp_dir()), serving_input_receiver_fn)
self.assertTrue(tf.gfile.Exists(export_dir))
self._test_identity_savedmodel(export_dir)
def test_complete_flow_with_no_tpu(self):
# Choose the train_batch_size divisible by 2 and 8 (common shards in test
# env) and batch_size for eval and predict prime number.
train_batch_size = 16
eval_batch_size = 8
predict_batch_size = 1
run_config = create_run_config(iterations_per_loop=4)
num_shards = run_config.tpu_config.num_shards
(expected_batch_size_for_model_fn, expected_batch_size_for_input_fn,
expected_called_count_for_input_fn) = (
self._generate_expected_batch_size_and_called_count(
num_shards, train_batch_size, eval_batch_size, predict_batch_size,
train_sharding_policy=_UNSHARDED,
eval_sharding_policy=_UNSHARDED))
est = tpu_estimator.TPUEstimator(
model_fn=self._make_model_fn(
expected_batch_size_for_model_fn, use_tpu_estimator_spec=True),
config=run_config,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
predict_batch_size=predict_batch_size,
use_tpu=False)
# TRAIN
# learn y = x
# Note: Gradients are all zero. Just testing execution.
train_input_fn = self._make_input_fn(mode=_TRAIN, repeat=True)
est.train(train_input_fn, steps=7)
# EVALUTE
eval_input_fn = self._make_input_fn(mode=_EVAL)
scores = est.evaluate(eval_input_fn, steps=2)
self.assertEqual(7, scores['global_step'])
self.assertGreater(0.1, scores['absolute_error'])
# PREDICT
predict_input_fn = self._make_input_fn(mode=_PREDICT)
predictions = [x['predictions'] for x in est.predict(predict_input_fn)]
self.assertAllClose(self._data, predictions, atol=0.01)
# Verify all input_fn invoke recorded metadata.
self.assertInputFnCalledCountAndBatch(
expected_called_count_for_input_fn, expected_batch_size_for_input_fn)
# EXPORT
feature_spec = {'x': tf.io.FixedLenFeature([1], tf.float32)}
serving_input_receiver_fn = (
export.build_parsing_serving_input_receiver_fn(feature_spec))
with self.export_mode():
export_dir = est.export_saved_model(
tempfile.mkdtemp(dir=self.get_temp_dir()), serving_input_receiver_fn)
self.assertTrue(tf.gfile.Exists(export_dir))
self._test_identity_savedmodel(export_dir)
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
|
en
| 0.805318
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Tests for TPUEstimator. # pylint: disable=g-direct-tensorflow-import # pylint: enable=g-direct-tensorflow-import Enable the export mode for model_fn. # Inside the model_fn, the test will check the batch size passed via params. # However, export mode should not have that. It is infeasible for model_fn # to distinguish the predict vs export mode today. So, this contextmanager # helps the model_fn to do that. # On CPU. # Always check batch size in params # Check the input feeds correct shape for train and eval. When eval on CPU # or predict, it is allowed to have dynamic shape. So, here only validates # the fully known shape (which covers the TPU train). # pylint: disable=g-long-lambda # Choose the train_batch_size divisible by 2 and 8 (common shards in test # env) and batch_size for eval and predict prime number. # TRAIN # learn y = x # Note: Gradients are all zero. Just testing execution. # EVALUTE # PREDICT # Verify all input_fn invoke recorded metadata. # EXPORT # Choose the train_batch_size divisible by 2 and 8 (common shards in test # env) and batch_size for eval and predict prime number. # TRAIN # learn y = x # Note: Gradients are all zero. Just testing execution. # EVALUTE # PREDICT # Verify all input_fn invoke recorded metadata. # EXPORT # Choose the train_batch_size divisible by 2 and 8 (common shards in test # env) and batch_size for eval and predict prime number. # TRAIN # learn y = x # Note: Gradients are all zero. Just testing execution. # EVALUTE # PREDICT # Verify all input_fn invoke recorded metadata. # EXPORT # Choose the train_batch_size divisible by 2 and 8 (common shards in test # env) and batch_size for eval and predict prime number. # TRAIN # learn y = x # Note: Gradients are all zero. Just testing execution. # EVALUTE # PREDICT # Verify all input_fn invoke recorded metadata. # EXPORT
| 1.678806
| 2
|
src/command_modules/azure-cli-storage/azure/cli/command_modules/storage/_validators.py
|
jfcoz/azure-cli
| 0
|
6625921
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=protected-access
from knack.log import get_logger
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.validators import validate_key_value_pairs
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.command_modules.storage._client_factory import get_storage_data_service_client
from azure.cli.command_modules.storage.util import glob_files_locally, guess_content_type
from azure.cli.command_modules.storage.sdkutil import get_table_data_type
from azure.cli.command_modules.storage.url_quote_util import encode_for_url
from azure.cli.command_modules.storage.oauth_token_util import TokenUpdater
storage_account_key_options = {'primary': 'key1', 'secondary': 'key2'}
logger = get_logger(__name__)
# Utilities
# pylint: disable=inconsistent-return-statements,too-many-lines
def _query_account_key(cli_ctx, account_name):
"""Query the storage account key. This is used when the customer doesn't offer account key but name."""
rg, scf = _query_account_rg(cli_ctx, account_name)
t_storage_account_keys = get_sdk(
cli_ctx, ResourceType.MGMT_STORAGE, 'models.storage_account_keys#StorageAccountKeys')
if t_storage_account_keys:
return scf.storage_accounts.list_keys(rg, account_name).key1
# of type: models.storage_account_list_keys_result#StorageAccountListKeysResult
return scf.storage_accounts.list_keys(rg, account_name).keys[0].value # pylint: disable=no-member
def _query_account_rg(cli_ctx, account_name):
"""Query the storage account's resource group, which the mgmt sdk requires."""
scf = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_STORAGE)
acc = next((x for x in scf.storage_accounts.list() if x.name == account_name), None)
if acc:
from msrestazure.tools import parse_resource_id
return parse_resource_id(acc.id)['resource_group'], scf
raise ValueError("Storage account '{}' not found.".format(account_name))
def _create_token_credential(cli_ctx):
from knack.cli import EVENT_CLI_POST_EXECUTE
TokenCredential = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common#TokenCredential')
token_credential = TokenCredential()
updater = TokenUpdater(token_credential, cli_ctx)
def _cancel_timer_event_handler(_, **__):
updater.cancel()
cli_ctx.register_event(EVENT_CLI_POST_EXECUTE, _cancel_timer_event_handler)
return token_credential
# region PARAMETER VALIDATORS
def process_resource_group(cmd, namespace):
"""Processes the resource group parameter from the account name"""
if namespace.account_name and not namespace.resource_group_name:
namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
def validate_table_payload_format(cmd, namespace):
t_table_payload = get_table_data_type(cmd.cli_ctx, 'table', 'TablePayloadFormat')
if namespace.accept:
formats = {
'none': t_table_payload.JSON_NO_METADATA,
'minimal': t_table_payload.JSON_MINIMAL_METADATA,
'full': t_table_payload.JSON_FULL_METADATA
}
namespace.accept = formats[namespace.accept.lower()]
def validate_bypass(namespace):
if namespace.bypass:
namespace.bypass = ', '.join(namespace.bypass) if isinstance(namespace.bypass, list) else namespace.bypass
def validate_client_parameters(cmd, namespace):
""" Retrieves storage connection parameters from environment variables and parses out connection string into
account name and key """
n = namespace
def get_config_value(section, key, default):
return cmd.cli_ctx.config.get(section, key, default)
if hasattr(n, 'auth_mode'):
auth_mode = n.auth_mode or get_config_value('storage', 'auth_mode', None)
del n.auth_mode
if not n.account_name:
n.account_name = get_config_value('storage', 'account', None)
if auth_mode == 'login':
n.token_credential = _create_token_credential(cmd.cli_ctx)
# give warning if there are account key args being ignored
account_key_args = [n.account_key and "--account-key", n.sas_token and "--sas-token",
n.connection_string and "--connection-string"]
account_key_args = [arg for arg in account_key_args if arg]
if account_key_args:
logger.warning('In "login" auth mode, the following arguments are ignored: %s',
' ,'.join(account_key_args))
return
if not n.connection_string:
n.connection_string = get_config_value('storage', 'connection_string', None)
# if connection string supplied or in environment variables, extract account key and name
if n.connection_string:
conn_dict = validate_key_value_pairs(n.connection_string)
n.account_name = conn_dict.get('AccountName')
n.account_key = conn_dict.get('AccountKey')
n.sas_token = conn_dict.get('SharedAccessSignature')
# otherwise, simply try to retrieve the remaining variables from environment variables
if not n.account_name:
n.account_name = get_config_value('storage', 'account', None)
if not n.account_key:
n.account_key = get_config_value('storage', 'key', None)
if not n.sas_token:
n.sas_token = get_config_value('storage', 'sas_token', None)
# strip the '?' from sas token. the portal and command line are returns sas token in different
# forms
if n.sas_token:
n.sas_token = n.sas_token.lstrip('?')
# if account name is specified but no key, attempt to query
if n.account_name and not n.account_key and not n.sas_token:
n.account_key = _query_account_key(cmd.cli_ctx, n.account_name)
def process_blob_source_uri(cmd, namespace):
"""
Validate the parameters referenced to a blob source and create the source URI from them.
"""
from .util import create_short_lived_blob_sas
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --source-uri' \
'\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & sas] ' \
'\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & key] '
ns = vars(namespace)
# source as blob
container = ns.pop('source_container', None)
blob = ns.pop('source_blob', None)
snapshot = ns.pop('source_snapshot', None)
# source credential clues
source_account_name = ns.pop('source_account_name', None)
source_account_key = ns.pop('source_account_key', None)
sas = ns.pop('source_sas', None)
# source in the form of an uri
uri = ns.get('copy_source', None)
if uri:
if any([container, blob, sas, snapshot, source_account_name, source_account_key]):
raise ValueError(usage_string.format('Unused parameters are given in addition to the '
'source URI'))
else:
# simplest scenario--no further processing necessary
return
validate_client_parameters(cmd, namespace) # must run first to resolve storage account
# determine if the copy will happen in the same storage account
if not source_account_name and source_account_key:
raise ValueError(usage_string.format('Source account key is given but account name is not'))
elif not source_account_name and not source_account_key:
# neither source account name or key is given, assume that user intends to copy blob in
# the same account
source_account_name = ns.get('account_name', None)
source_account_key = ns.get('account_key', None)
elif source_account_name and not source_account_key:
if source_account_name == ns.get('account_name', None):
# the source account name is same as the destination account name
source_account_key = ns.get('account_key', None)
else:
# the source account is different from destination account but the key is missing
# try to query one.
try:
source_account_key = _query_account_key(cmd.cli_ctx, source_account_name)
except ValueError:
raise ValueError('Source storage account {} not found.'.format(source_account_name))
# else: both source account name and key are given by user
if not source_account_name:
raise ValueError(usage_string.format('Storage account name not found'))
if not sas:
sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob)
query_params = []
if sas:
query_params.append(sas)
if snapshot:
query_params.append('snapshot={}'.format(snapshot))
uri = 'https://{}.blob.{}/{}/{}{}{}'.format(source_account_name,
cmd.cli_ctx.cloud.suffixes.storage_endpoint,
container,
blob,
'?' if query_params else '',
'&'.join(query_params))
namespace.copy_source = uri
def validate_source_uri(cmd, namespace): # pylint: disable=too-many-statements
from .util import create_short_lived_blob_sas, create_short_lived_file_sas
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --source-uri [--source-sas]' \
'\n\tOR --source-container --source-blob [--source-account-name & sas] [--source-snapshot]' \
'\n\tOR --source-container --source-blob [--source-account-name & key] [--source-snapshot]' \
'\n\tOR --source-share --source-path' \
'\n\tOR --source-share --source-path [--source-account-name & sas]' \
'\n\tOR --source-share --source-path [--source-account-name & key]'
ns = vars(namespace)
# source as blob
container = ns.pop('source_container', None)
blob = ns.pop('source_blob', None)
snapshot = ns.pop('source_snapshot', None)
# source as file
share = ns.pop('source_share', None)
path = ns.pop('source_path', None)
# source credential clues
source_account_name = ns.pop('source_account_name', None)
source_account_key = ns.pop('source_account_key', None)
source_sas = ns.pop('source_sas', None)
# source in the form of an uri
uri = ns.get('copy_source', None)
if uri:
if any([container, blob, snapshot, share, path, source_account_name,
source_account_key]):
raise ValueError(usage_string.format('Unused parameters are given in addition to the '
'source URI'))
if source_sas:
source_sas = source_sas.lstrip('?')
uri = '{}{}{}'.format(uri, '?', source_sas)
namespace.copy_source = uri
return
# ensure either a file or blob source is specified
valid_blob_source = container and blob and not share and not path
valid_file_source = share and path and not container and not blob and not snapshot
if not valid_blob_source and not valid_file_source:
raise ValueError(usage_string.format('Neither a valid blob or file source is specified'))
elif valid_blob_source and valid_file_source:
raise ValueError(usage_string.format('Ambiguous parameters, both blob and file sources are '
'specified'))
validate_client_parameters(cmd, namespace) # must run first to resolve storage account
if not source_account_name:
if source_account_key:
raise ValueError(usage_string.format('Source account key is given but account name is not'))
# assume that user intends to copy blob in the same account
source_account_name = ns.get('account_name', None)
# determine if the copy will happen in the same storage account
same_account = False
if not source_account_key and not source_sas:
if source_account_name == ns.get('account_name', None):
same_account = True
source_account_key = ns.get('account_key', None)
source_sas = ns.get('sas_token', None)
else:
# the source account is different from destination account but the key is missing try to query one.
try:
source_account_key = _query_account_key(cmd.cli_ctx, source_account_name)
except ValueError:
raise ValueError('Source storage account {} not found.'.format(source_account_name))
# Both source account name and either key or sas (or both) are now available
if not source_sas:
# generate a sas token even in the same account when the source and destination are not the same kind.
if valid_file_source and (ns.get('container_name', None) or not same_account):
import os
dir_name, file_name = os.path.split(path) if path else (None, '')
source_sas = create_short_lived_file_sas(cmd, source_account_name, source_account_key, share,
dir_name, file_name)
elif valid_blob_source and (ns.get('share_name', None) or not same_account):
source_sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob)
query_params = []
if source_sas:
query_params.append(source_sas.lstrip('?'))
if snapshot:
query_params.append('snapshot={}'.format(snapshot))
uri = 'https://{0}.{1}.{6}/{2}/{3}{4}{5}'.format(
source_account_name,
'blob' if valid_blob_source else 'file',
container if valid_blob_source else share,
encode_for_url(blob if valid_blob_source else path),
'?' if query_params else '',
'&'.join(query_params),
cmd.cli_ctx.cloud.suffixes.storage_endpoint)
namespace.copy_source = uri
def validate_blob_type(namespace):
if not namespace.blob_type:
namespace.blob_type = 'page' if namespace.file_path.endswith('.vhd') else 'block'
def validate_storage_data_plane_list(namespace):
if namespace.num_results == '*':
namespace.num_results = None
else:
namespace.num_results = int(namespace.num_results)
def get_content_setting_validator(settings_class, update, guess_from_file=None):
def _class_name(class_type):
return class_type.__module__ + "." + class_type.__class__.__name__
def validator(cmd, namespace):
t_base_blob_service, t_file_service, t_blob_content_settings, t_file_content_settings = cmd.get_models(
'blob.baseblobservice#BaseBlobService',
'file#FileService',
'blob.models#ContentSettings',
'file.models#ContentSettings')
# must run certain validators first for an update
if update:
validate_client_parameters(cmd, namespace)
if update and _class_name(settings_class) == _class_name(t_file_content_settings):
get_file_path_validator()(namespace)
ns = vars(namespace)
# retrieve the existing object properties for an update
if update:
account = ns.get('account_name')
key = ns.get('account_key')
cs = ns.get('connection_string')
sas = ns.get('sas_token')
if _class_name(settings_class) == _class_name(t_blob_content_settings):
client = get_storage_data_service_client(cmd.cli_ctx,
t_base_blob_service,
account,
key,
cs,
sas)
container = ns.get('container_name')
blob = ns.get('blob_name')
lease_id = ns.get('lease_id')
props = client.get_blob_properties(container, blob, lease_id=lease_id).properties.content_settings
elif _class_name(settings_class) == _class_name(t_file_content_settings):
client = get_storage_data_service_client(cmd.cli_ctx, t_file_service, account, key, cs, sas)
share = ns.get('share_name')
directory = ns.get('directory_name')
filename = ns.get('file_name')
props = client.get_file_properties(share, directory, filename).properties.content_settings
# create new properties
new_props = settings_class(
content_type=ns.pop('content_type', None),
content_disposition=ns.pop('content_disposition', None),
content_encoding=ns.pop('content_encoding', None),
content_language=ns.pop('content_language', None),
content_md5=ns.pop('content_md5', None),
cache_control=ns.pop('content_cache_control', None)
)
# if update, fill in any None values with existing
if update:
for attr in ['content_type', 'content_disposition', 'content_encoding', 'content_language', 'content_md5',
'cache_control']:
if getattr(new_props, attr) is None:
setattr(new_props, attr, getattr(props, attr))
else:
if guess_from_file:
new_props = guess_content_type(ns[guess_from_file], new_props, settings_class)
ns['content_settings'] = new_props
return validator
def validate_custom_domain(namespace):
if namespace.use_subdomain and not namespace.custom_domain:
raise ValueError('usage error: --custom-domain DOMAIN [--use-subdomain]')
def validate_encryption_services(cmd, namespace):
"""
Builds up the encryption services object for storage account operations based on the list of services passed in.
"""
if namespace.encryption_services:
t_encryption_services, t_encryption_service = get_sdk(cmd.cli_ctx, ResourceType.MGMT_STORAGE,
'EncryptionServices', 'EncryptionService', mod='models')
services = {service: t_encryption_service(enabled=True) for service in namespace.encryption_services}
namespace.encryption_services = t_encryption_services(**services)
def validate_encryption_source(cmd, namespace):
ns = vars(namespace)
key_name = ns.pop('encryption_key_name', None)
key_version = ns.pop('encryption_key_version', None)
key_vault_uri = ns.pop('encryption_key_vault', None)
if namespace.encryption_key_source == 'Microsoft.Keyvault' and not (key_name and key_version and key_vault_uri):
raise ValueError('--encryption-key-name, --encryption-key-vault, and --encryption-key-version are required '
'when --encryption-key-source=Microsoft.Keyvault is specified.')
if key_name or key_version or key_vault_uri:
if namespace.encryption_key_source != 'Microsoft.Keyvault':
raise ValueError('--encryption-key-name, --encryption-key-vault, and --encryption-key-version are not '
'applicable when --encryption-key-source=Microsoft.Keyvault is not specified.')
KeyVaultProperties = get_sdk(cmd.cli_ctx, ResourceType.MGMT_STORAGE, 'KeyVaultProperties',
mod='models')
if not KeyVaultProperties:
return
kv_prop = KeyVaultProperties(key_name=key_name, key_version=key_version, key_vault_uri=key_vault_uri)
namespace.encryption_key_vault_properties = kv_prop
def validate_entity(namespace):
""" Converts a list of key value pairs into a dictionary. Ensures that required
RowKey and PartitionKey are converted to the correct case and included. """
values = dict(x.split('=', 1) for x in namespace.entity)
keys = values.keys()
for key in keys:
if key.lower() == 'rowkey':
val = values[key]
del values[key]
values['RowKey'] = val
elif key.lower() == 'partitionkey':
val = values[key]
del values[key]
values['PartitionKey'] = val
keys = values.keys()
missing_keys = 'RowKey ' if 'RowKey' not in keys else ''
missing_keys = '{}PartitionKey'.format(missing_keys) \
if 'PartitionKey' not in keys else missing_keys
if missing_keys:
import argparse
raise argparse.ArgumentError(
None, 'incorrect usage: entity requires: {}'.format(missing_keys))
def cast_val(key, val):
""" Attempts to cast numeric values (except RowKey and PartitionKey) to numbers so they
can be queried correctly. """
if key in ['PartitionKey', 'RowKey']:
return val
def try_cast(to_type):
try:
return to_type(val)
except ValueError:
return None
return try_cast(int) or try_cast(float) or val
# ensure numbers are converted from strings so querying will work correctly
values = {key: cast_val(key, val) for key, val in values.items()}
namespace.entity = values
def validate_marker(namespace):
""" Converts a list of key value pairs into a dictionary. Ensures that required
nextrowkey and nextpartitionkey are included. """
if not namespace.marker:
return
marker = dict(x.split('=', 1) for x in namespace.marker)
expected_keys = {'nextrowkey', 'nextpartitionkey'}
for key in marker:
new_key = key.lower()
if new_key in expected_keys:
expected_keys.remove(key.lower())
val = marker[key]
del marker[key]
marker[new_key] = val
if expected_keys:
import argparse
raise argparse.ArgumentError(
None, 'incorrect usage: marker requires: {}'.format(' '.join(expected_keys)))
namespace.marker = marker
def get_file_path_validator(default_file_param=None):
""" Creates a namespace validator that splits out 'path' into 'directory_name' and 'file_name'.
Allows another path-type parameter to be named which can supply a default filename. """
def validator(namespace):
import os
if not hasattr(namespace, 'path'):
return
path = namespace.path
dir_name, file_name = os.path.split(path) if path else (None, '')
if default_file_param and '.' not in file_name:
dir_name = path
file_name = os.path.split(getattr(namespace, default_file_param))[1]
dir_name = None if dir_name in ('', '.') else dir_name
namespace.directory_name = dir_name
namespace.file_name = file_name
del namespace.path
return validator
def validate_included_datasets(cmd, namespace):
if namespace.include:
include = namespace.include
if set(include) - set('cmsd'):
help_string = '(c)opy-info (m)etadata (s)napshots (d)eleted'
raise ValueError('valid values are {} or a combination thereof.'.format(help_string))
t_blob_include = cmd.get_models('blob#Include')
namespace.include = t_blob_include('s' in include, 'm' in include, False, 'c' in include, 'd' in include)
def validate_key(namespace):
namespace.key_name = storage_account_key_options[namespace.key_name]
def validate_metadata(namespace):
if namespace.metadata:
namespace.metadata = dict(x.split('=', 1) for x in namespace.metadata)
def get_permission_help_string(permission_class):
allowed_values = [x.lower() for x in dir(permission_class) if not x.startswith('__')]
return ' '.join(['({}){}'.format(x[0], x[1:]) for x in allowed_values])
def get_permission_validator(permission_class):
allowed_values = [x.lower() for x in dir(permission_class) if not x.startswith('__')]
allowed_string = ''.join(x[0] for x in allowed_values)
def validator(namespace):
if namespace.permission:
if set(namespace.permission) - set(allowed_string):
help_string = get_permission_help_string(permission_class)
raise ValueError(
'valid values are {} or a combination thereof.'.format(help_string))
namespace.permission = permission_class(_str=namespace.permission)
return validator
def table_permission_validator(cmd, namespace):
""" A special case for table because the SDK associates the QUERY permission with 'r' """
t_table_permissions = get_table_data_type(cmd.cli_ctx, 'table', 'TablePermissions')
if namespace.permission:
if set(namespace.permission) - set('raud'):
help_string = '(r)ead/query (a)dd (u)pdate (d)elete'
raise ValueError('valid values are {} or a combination thereof.'.format(help_string))
namespace.permission = t_table_permissions(_str=namespace.permission)
def validate_container_public_access(cmd, namespace):
from .sdkutil import get_container_access_type
t_base_blob_svc = cmd.get_models('blob.baseblobservice#BaseBlobService')
if namespace.public_access:
namespace.public_access = get_container_access_type(cmd.cli_ctx, namespace.public_access.lower())
if hasattr(namespace, 'signed_identifiers'):
# must retrieve the existing ACL to simulate a patch operation because these calls
# are needlessly conflated
ns = vars(namespace)
validate_client_parameters(cmd, namespace)
account = ns.get('account_name')
key = ns.get('account_key')
cs = ns.get('connection_string')
sas = ns.get('sas_token')
client = get_storage_data_service_client(cmd.cli_ctx, t_base_blob_svc, account, key, cs, sas)
container = ns.get('container_name')
lease_id = ns.get('lease_id')
ns['signed_identifiers'] = client.get_container_acl(container, lease_id=lease_id)
def validate_select(namespace):
if namespace.select:
namespace.select = ','.join(namespace.select)
# pylint: disable=too-many-statements
def get_source_file_or_blob_service_client(cmd, namespace):
"""
Create the second file service or blob service client for batch copy command, which is used to
list the source files or blobs. If both the source account and source URI are omitted, it
indicates that user want to copy files or blobs in the same storage account, therefore the
destination client will be set None hence the command will use destination client.
"""
t_file_svc, t_block_blob_svc = cmd.get_models('file#FileService', 'blob.blockblobservice#BlockBlobService')
usage_string = 'invalid usage: supply only one of the following argument sets:' + \
'\n\t --source-uri [--source-sas]' + \
'\n\tOR --source-container' + \
'\n\tOR --source-container --source-account-name --source-account-key' + \
'\n\tOR --source-container --source-account-name --source-sas' + \
'\n\tOR --source-share --source-account-name --source-account-key' + \
'\n\tOR --source-share --source-account-name --source-account-sas'
ns = vars(namespace)
source_account = ns.pop('source_account_name', None)
source_key = ns.pop('source_account_key', None)
source_uri = ns.pop('source_uri', None)
source_sas = ns.get('source_sas', None)
source_container = ns.get('source_container', None)
source_share = ns.get('source_share', None)
if source_uri and source_account:
raise ValueError(usage_string)
if not source_uri and bool(source_container) == bool(source_share): # must be container or share
raise ValueError(usage_string)
if (not source_account) and (not source_uri):
# Set the source_client to None if neither source_account or source_uri is given. This
# indicates the command that the source files share or blob container is in the same storage
# account as the destination file share or blob container.
#
# The command itself should create the source service client since the validator can't
# access the destination client through the namespace.
#
# A few arguments check will be made as well so as not to cause ambiguity.
if source_key or source_sas:
raise ValueError('invalid usage: --source-account-name is missing; the source account is assumed to be the'
' same as the destination account. Do not provide --source-sas or --source-account-key')
ns['source_client'] = None
if 'token_credential' not in ns: # not using oauth
return
# oauth is only possible through destination, must still get source creds
source_account, source_key, source_sas = ns['account_name'], ns['account_key'], ns['sas_token']
if source_account:
if not (source_key or source_sas):
# when neither storage account key or SAS is given, try to fetch the key in the current
# subscription
source_key = _query_account_key(cmd.cli_ctx, source_account)
if source_container:
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_block_blob_svc, name=source_account, key=source_key, sas_token=source_sas)
elif source_share:
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_file_svc, name=source_account, key=source_key, sas_token=source_sas)
elif source_uri:
if source_key or source_container or source_share:
raise ValueError(usage_string)
from .storage_url_helpers import StorageResourceIdentifier
if source_sas:
source_uri = '{}{}{}'.format(source_uri, '?', source_sas.lstrip('?'))
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, source_uri)
nor_container_or_share = not identifier.container and not identifier.share
if not identifier.is_url():
raise ValueError('incorrect usage: --source-uri expects a URI')
elif identifier.blob or identifier.directory or \
identifier.filename or nor_container_or_share:
raise ValueError('incorrect usage: --source-uri has to be blob container or file share')
if identifier.sas_token:
ns['source_sas'] = identifier.sas_token
else:
source_key = _query_account_key(cmd.cli_ctx, identifier.account_name)
if identifier.container:
ns['source_container'] = identifier.container
if identifier.account_name != ns.get('account_name'):
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_block_blob_svc, name=identifier.account_name, key=source_key,
sas_token=identifier.sas_token)
elif identifier.share:
ns['source_share'] = identifier.share
if identifier.account_name != ns.get('account_name'):
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_file_svc, name=identifier.account_name, key=source_key,
sas_token=identifier.sas_token)
def add_progress_callback(cmd, namespace):
def _update_progress(current, total):
hook = cmd.cli_ctx.get_progress_controller(det=True)
if total:
hook.add(message='Alive', value=current, total_val=total)
if total == current:
hook.end()
if not namespace.no_progress:
namespace.progress_callback = _update_progress
del namespace.no_progress
def process_container_delete_parameters(cmd, namespace):
"""Process the parameters for storage container delete command"""
# check whether to use mgmt or data-plane
if namespace.bypass_immutability_policy:
# use management-plane
namespace.processed_account_name = namespace.account_name
namespace.processed_resource_group, namespace.mgmt_client = _query_account_rg(
cmd.cli_ctx, namespace.account_name)
del namespace.auth_mode
else:
# use data-plane, like before
validate_client_parameters(cmd, namespace)
def process_blob_download_batch_parameters(cmd, namespace):
"""Process the parameters for storage blob download command"""
import os
# 1. quick check
if not os.path.exists(namespace.destination) or not os.path.isdir(namespace.destination):
raise ValueError('incorrect usage: destination must be an existing directory')
# 2. try to extract account name and container name from source string
_process_blob_batch_container_parameters(cmd, namespace)
# 3. Call validators
add_progress_callback(cmd, namespace)
def process_blob_upload_batch_parameters(cmd, namespace):
"""Process the source and destination of storage blob upload command"""
import os
# 1. quick check
if not os.path.exists(namespace.source) or not os.path.isdir(namespace.source):
raise ValueError('incorrect usage: source must be an existing directory')
# 2. try to extract account name and container name from destination string
_process_blob_batch_container_parameters(cmd, namespace, source=False)
# 3. collect the files to be uploaded
namespace.source = os.path.realpath(namespace.source)
namespace.source_files = [c for c in glob_files_locally(namespace.source, namespace.pattern)]
# 4. determine blob type
if namespace.blob_type is None:
vhd_files = [f for f in namespace.source_files if f[0].endswith('.vhd')]
if any(vhd_files) and len(vhd_files) == len(namespace.source_files):
# when all the listed files are vhd files use page
namespace.blob_type = 'page'
elif any(vhd_files):
# source files contain vhd files but not all of them
from knack.util import CLIError
raise CLIError("""Fail to guess the required blob type. Type of the files to be
uploaded are not consistent. Default blob type for .vhd files is "page", while
others are "block". You can solve this problem by either explicitly set the blob
type or ensure the pattern matches a correct set of files.""")
else:
namespace.blob_type = 'block'
# 5. call other validators
validate_metadata(namespace)
t_blob_content_settings = cmd.loader.get_sdk('blob.models#ContentSettings')
get_content_setting_validator(t_blob_content_settings, update=False)(cmd, namespace)
add_progress_callback(cmd, namespace)
def process_blob_delete_batch_parameters(cmd, namespace):
_process_blob_batch_container_parameters(cmd, namespace)
def _process_blob_batch_container_parameters(cmd, namespace, source=True):
"""Process the container parameters for storage blob batch commands before populating args from environment."""
if source:
container_arg, container_name_arg = 'source', 'source_container_name'
else:
# destination
container_arg, container_name_arg = 'destination', 'destination_container_name'
# try to extract account name and container name from source string
from .storage_url_helpers import StorageResourceIdentifier
container_arg_val = getattr(namespace, container_arg) # either a url or name
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, container_arg_val)
if not identifier.is_url():
setattr(namespace, container_name_arg, container_arg_val)
elif identifier.blob:
raise ValueError('incorrect usage: {} should be either a container URL or name'.format(container_arg))
else:
setattr(namespace, container_name_arg, identifier.container)
if namespace.account_name is None:
namespace.account_name = identifier.account_name
elif namespace.account_name != identifier.account_name:
raise ValueError('The given storage account name is not consistent with the '
'account name in the destination URL')
# if no sas-token is given and the container url contains one, use it
if not namespace.sas_token and identifier.sas_token:
namespace.sas_token = identifier.sas_token
# Finally, grab missing storage connection parameters from environment variables
validate_client_parameters(cmd, namespace)
def process_file_upload_batch_parameters(cmd, namespace):
"""Process the parameters of storage file batch upload command"""
import os
# 1. quick check
if not os.path.exists(namespace.source):
raise ValueError('incorrect usage: source {} does not exist'.format(namespace.source))
if not os.path.isdir(namespace.source):
raise ValueError('incorrect usage: source must be a directory')
# 2. try to extract account name and container name from destination string
from .storage_url_helpers import StorageResourceIdentifier
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, namespace.destination)
if identifier.is_url():
if identifier.filename or identifier.directory:
raise ValueError('incorrect usage: destination must be a file share url')
namespace.destination = identifier.share
if not namespace.account_name:
namespace.account_name = identifier.account_name
namespace.source = os.path.realpath(namespace.source)
def process_file_download_batch_parameters(cmd, namespace):
"""Process the parameters for storage file batch download command"""
import os
# 1. quick check
if not os.path.exists(namespace.destination) or not os.path.isdir(namespace.destination):
raise ValueError('incorrect usage: destination must be an existing directory')
# 2. try to extract account name and share name from source string
process_file_batch_source_parameters(cmd, namespace)
def process_file_batch_source_parameters(cmd, namespace):
from .storage_url_helpers import StorageResourceIdentifier
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, namespace.source)
if identifier.is_url():
if identifier.filename or identifier.directory:
raise ValueError('incorrect usage: source should be either share URL or name')
namespace.source = identifier.share
if not namespace.account_name:
namespace.account_name = identifier.account_name
def process_file_download_namespace(namespace):
import os
get_file_path_validator()(namespace)
dest = namespace.file_path
if not dest or os.path.isdir(dest):
namespace.file_path = os.path.join(dest, namespace.file_name) \
if dest else namespace.file_name
def process_metric_update_namespace(namespace):
import argparse
namespace.hour = namespace.hour == 'true'
namespace.minute = namespace.minute == 'true'
namespace.api = namespace.api == 'true' if namespace.api else None
if namespace.hour is None and namespace.minute is None:
raise argparse.ArgumentError(
None, 'incorrect usage: must specify --hour and/or --minute')
if (namespace.hour or namespace.minute) and namespace.api is None:
raise argparse.ArgumentError(
None, 'incorrect usage: specify --api when hour or minute metrics are enabled')
def validate_subnet(cmd, namespace):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
subnet = namespace.subnet
subnet_is_id = is_valid_resource_id(subnet)
vnet = namespace.vnet_name
if (subnet_is_id and not vnet) or (not subnet and not vnet):
return
elif subnet and not subnet_is_id and vnet:
namespace.subnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet,
child_type_1='subnets',
child_name_1=subnet)
else:
from knack.util import CLIError
raise CLIError('incorrect usage: [--subnet ID | --subnet NAME --vnet-name NAME]')
def get_datetime_type(to_string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
from datetime import datetime
def datetime_type(string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
accepted_date_formats = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%MZ',
'%Y-%m-%dT%HZ', '%Y-%m-%d']
for form in accepted_date_formats:
try:
if to_string:
return datetime.strptime(string, form).strftime(form)
return datetime.strptime(string, form)
except ValueError:
continue
raise ValueError("Input '{}' not valid. Valid example: 2000-12-31T12:59:59Z".format(string))
return datetime_type
def ipv4_range_type(string):
""" Validates an IPv4 address or address range. """
import re
ip_format = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
if not re.match("^{}$".format(ip_format), string):
if not re.match("^{}-{}$".format(ip_format, ip_format), string):
raise ValueError
return string
def resource_type_type(loader):
""" Returns a function which validates that resource types string contains only a combination of service,
container, and object. Their shorthand representations are s, c, and o. """
def impl(string):
t_resources = loader.get_models('common.models#ResourceTypes')
if set(string) - set("sco"):
raise ValueError
return t_resources(_str=''.join(set(string)))
return impl
def services_type(loader):
""" Returns a function which validates that services string contains only a combination of blob, queue, table,
and file. Their shorthand representations are b, q, t, and f. """
def impl(string):
t_services = loader.get_models('common.models#Services')
if set(string) - set("bqtf"):
raise ValueError
return t_services(_str=''.join(set(string)))
return impl
def get_char_options_validator(types, property_name):
def _validator(namespace):
service_types = set(getattr(namespace, property_name, list()))
if not service_types:
raise ValueError('Missing options --{}.'.format(property_name.replace('_', '-')))
if service_types - set(types):
raise ValueError(
'--{}: only valid values are: {}.'.format(property_name.replace('_', '-'), ', '.join(types)))
setattr(namespace, property_name, service_types)
return _validator
def page_blob_tier_validator(cmd, namespace):
if not namespace.tier:
return
if namespace.blob_type != 'page' and namespace.tier:
raise ValueError('Blob tier is only applicable to page blobs on premium storage accounts.')
try:
namespace.tier = getattr(cmd.get_models('blob.models#PremiumPageBlobTier'), namespace.tier)
except AttributeError:
from azure.cli.command_modules.storage.sdkutil import get_blob_tier_names
raise ValueError('Unknown premium page blob tier name. Choose among {}'.format(', '.join(
get_blob_tier_names(cmd.cli_ctx, 'PremiumPageBlobTier'))))
def block_blob_tier_validator(cmd, namespace):
if not namespace.tier:
return
if namespace.blob_type != 'block' and namespace.tier:
raise ValueError('Blob tier is only applicable to block blobs on standard storage accounts.')
try:
namespace.tier = getattr(cmd.get_models('blob.models#StandardBlobTier'), namespace.tier)
except AttributeError:
from azure.cli.command_modules.storage.sdkutil import get_blob_tier_names
raise ValueError('Unknown block blob tier name. Choose among {}'.format(', '.join(
get_blob_tier_names(cmd.cli_ctx, 'StandardBlobTier'))))
def blob_tier_validator(cmd, namespace):
if namespace.blob_type == 'page':
page_blob_tier_validator(cmd, namespace)
elif namespace.blob_type == 'block':
block_blob_tier_validator(cmd, namespace)
else:
raise ValueError('Blob tier is only applicable to block or page blob.')
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=protected-access
from knack.log import get_logger
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.validators import validate_key_value_pairs
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.command_modules.storage._client_factory import get_storage_data_service_client
from azure.cli.command_modules.storage.util import glob_files_locally, guess_content_type
from azure.cli.command_modules.storage.sdkutil import get_table_data_type
from azure.cli.command_modules.storage.url_quote_util import encode_for_url
from azure.cli.command_modules.storage.oauth_token_util import TokenUpdater
storage_account_key_options = {'primary': 'key1', 'secondary': 'key2'}
logger = get_logger(__name__)
# Utilities
# pylint: disable=inconsistent-return-statements,too-many-lines
def _query_account_key(cli_ctx, account_name):
"""Query the storage account key. This is used when the customer doesn't offer account key but name."""
rg, scf = _query_account_rg(cli_ctx, account_name)
t_storage_account_keys = get_sdk(
cli_ctx, ResourceType.MGMT_STORAGE, 'models.storage_account_keys#StorageAccountKeys')
if t_storage_account_keys:
return scf.storage_accounts.list_keys(rg, account_name).key1
# of type: models.storage_account_list_keys_result#StorageAccountListKeysResult
return scf.storage_accounts.list_keys(rg, account_name).keys[0].value # pylint: disable=no-member
def _query_account_rg(cli_ctx, account_name):
"""Query the storage account's resource group, which the mgmt sdk requires."""
scf = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_STORAGE)
acc = next((x for x in scf.storage_accounts.list() if x.name == account_name), None)
if acc:
from msrestazure.tools import parse_resource_id
return parse_resource_id(acc.id)['resource_group'], scf
raise ValueError("Storage account '{}' not found.".format(account_name))
def _create_token_credential(cli_ctx):
from knack.cli import EVENT_CLI_POST_EXECUTE
TokenCredential = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common#TokenCredential')
token_credential = TokenCredential()
updater = TokenUpdater(token_credential, cli_ctx)
def _cancel_timer_event_handler(_, **__):
updater.cancel()
cli_ctx.register_event(EVENT_CLI_POST_EXECUTE, _cancel_timer_event_handler)
return token_credential
# region PARAMETER VALIDATORS
def process_resource_group(cmd, namespace):
"""Processes the resource group parameter from the account name"""
if namespace.account_name and not namespace.resource_group_name:
namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
def validate_table_payload_format(cmd, namespace):
t_table_payload = get_table_data_type(cmd.cli_ctx, 'table', 'TablePayloadFormat')
if namespace.accept:
formats = {
'none': t_table_payload.JSON_NO_METADATA,
'minimal': t_table_payload.JSON_MINIMAL_METADATA,
'full': t_table_payload.JSON_FULL_METADATA
}
namespace.accept = formats[namespace.accept.lower()]
def validate_bypass(namespace):
if namespace.bypass:
namespace.bypass = ', '.join(namespace.bypass) if isinstance(namespace.bypass, list) else namespace.bypass
def validate_client_parameters(cmd, namespace):
""" Retrieves storage connection parameters from environment variables and parses out connection string into
account name and key """
n = namespace
def get_config_value(section, key, default):
return cmd.cli_ctx.config.get(section, key, default)
if hasattr(n, 'auth_mode'):
auth_mode = n.auth_mode or get_config_value('storage', 'auth_mode', None)
del n.auth_mode
if not n.account_name:
n.account_name = get_config_value('storage', 'account', None)
if auth_mode == 'login':
n.token_credential = _create_token_credential(cmd.cli_ctx)
# give warning if there are account key args being ignored
account_key_args = [n.account_key and "--account-key", n.sas_token and "--sas-token",
n.connection_string and "--connection-string"]
account_key_args = [arg for arg in account_key_args if arg]
if account_key_args:
logger.warning('In "login" auth mode, the following arguments are ignored: %s',
' ,'.join(account_key_args))
return
if not n.connection_string:
n.connection_string = get_config_value('storage', 'connection_string', None)
# if connection string supplied or in environment variables, extract account key and name
if n.connection_string:
conn_dict = validate_key_value_pairs(n.connection_string)
n.account_name = conn_dict.get('AccountName')
n.account_key = conn_dict.get('AccountKey')
n.sas_token = conn_dict.get('SharedAccessSignature')
# otherwise, simply try to retrieve the remaining variables from environment variables
if not n.account_name:
n.account_name = get_config_value('storage', 'account', None)
if not n.account_key:
n.account_key = get_config_value('storage', 'key', None)
if not n.sas_token:
n.sas_token = get_config_value('storage', 'sas_token', None)
# strip the '?' from sas token. the portal and command line are returns sas token in different
# forms
if n.sas_token:
n.sas_token = n.sas_token.lstrip('?')
# if account name is specified but no key, attempt to query
if n.account_name and not n.account_key and not n.sas_token:
n.account_key = _query_account_key(cmd.cli_ctx, n.account_name)
def process_blob_source_uri(cmd, namespace):
"""
Validate the parameters referenced to a blob source and create the source URI from them.
"""
from .util import create_short_lived_blob_sas
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --source-uri' \
'\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & sas] ' \
'\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & key] '
ns = vars(namespace)
# source as blob
container = ns.pop('source_container', None)
blob = ns.pop('source_blob', None)
snapshot = ns.pop('source_snapshot', None)
# source credential clues
source_account_name = ns.pop('source_account_name', None)
source_account_key = ns.pop('source_account_key', None)
sas = ns.pop('source_sas', None)
# source in the form of an uri
uri = ns.get('copy_source', None)
if uri:
if any([container, blob, sas, snapshot, source_account_name, source_account_key]):
raise ValueError(usage_string.format('Unused parameters are given in addition to the '
'source URI'))
else:
# simplest scenario--no further processing necessary
return
validate_client_parameters(cmd, namespace) # must run first to resolve storage account
# determine if the copy will happen in the same storage account
if not source_account_name and source_account_key:
raise ValueError(usage_string.format('Source account key is given but account name is not'))
elif not source_account_name and not source_account_key:
# neither source account name or key is given, assume that user intends to copy blob in
# the same account
source_account_name = ns.get('account_name', None)
source_account_key = ns.get('account_key', None)
elif source_account_name and not source_account_key:
if source_account_name == ns.get('account_name', None):
# the source account name is same as the destination account name
source_account_key = ns.get('account_key', None)
else:
# the source account is different from destination account but the key is missing
# try to query one.
try:
source_account_key = _query_account_key(cmd.cli_ctx, source_account_name)
except ValueError:
raise ValueError('Source storage account {} not found.'.format(source_account_name))
# else: both source account name and key are given by user
if not source_account_name:
raise ValueError(usage_string.format('Storage account name not found'))
if not sas:
sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob)
query_params = []
if sas:
query_params.append(sas)
if snapshot:
query_params.append('snapshot={}'.format(snapshot))
uri = 'https://{}.blob.{}/{}/{}{}{}'.format(source_account_name,
cmd.cli_ctx.cloud.suffixes.storage_endpoint,
container,
blob,
'?' if query_params else '',
'&'.join(query_params))
namespace.copy_source = uri
def validate_source_uri(cmd, namespace): # pylint: disable=too-many-statements
from .util import create_short_lived_blob_sas, create_short_lived_file_sas
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --source-uri [--source-sas]' \
'\n\tOR --source-container --source-blob [--source-account-name & sas] [--source-snapshot]' \
'\n\tOR --source-container --source-blob [--source-account-name & key] [--source-snapshot]' \
'\n\tOR --source-share --source-path' \
'\n\tOR --source-share --source-path [--source-account-name & sas]' \
'\n\tOR --source-share --source-path [--source-account-name & key]'
ns = vars(namespace)
# source as blob
container = ns.pop('source_container', None)
blob = ns.pop('source_blob', None)
snapshot = ns.pop('source_snapshot', None)
# source as file
share = ns.pop('source_share', None)
path = ns.pop('source_path', None)
# source credential clues
source_account_name = ns.pop('source_account_name', None)
source_account_key = ns.pop('source_account_key', None)
source_sas = ns.pop('source_sas', None)
# source in the form of an uri
uri = ns.get('copy_source', None)
if uri:
if any([container, blob, snapshot, share, path, source_account_name,
source_account_key]):
raise ValueError(usage_string.format('Unused parameters are given in addition to the '
'source URI'))
if source_sas:
source_sas = source_sas.lstrip('?')
uri = '{}{}{}'.format(uri, '?', source_sas)
namespace.copy_source = uri
return
# ensure either a file or blob source is specified
valid_blob_source = container and blob and not share and not path
valid_file_source = share and path and not container and not blob and not snapshot
if not valid_blob_source and not valid_file_source:
raise ValueError(usage_string.format('Neither a valid blob or file source is specified'))
elif valid_blob_source and valid_file_source:
raise ValueError(usage_string.format('Ambiguous parameters, both blob and file sources are '
'specified'))
validate_client_parameters(cmd, namespace) # must run first to resolve storage account
if not source_account_name:
if source_account_key:
raise ValueError(usage_string.format('Source account key is given but account name is not'))
# assume that user intends to copy blob in the same account
source_account_name = ns.get('account_name', None)
# determine if the copy will happen in the same storage account
same_account = False
if not source_account_key and not source_sas:
if source_account_name == ns.get('account_name', None):
same_account = True
source_account_key = ns.get('account_key', None)
source_sas = ns.get('sas_token', None)
else:
# the source account is different from destination account but the key is missing try to query one.
try:
source_account_key = _query_account_key(cmd.cli_ctx, source_account_name)
except ValueError:
raise ValueError('Source storage account {} not found.'.format(source_account_name))
# Both source account name and either key or sas (or both) are now available
if not source_sas:
# generate a sas token even in the same account when the source and destination are not the same kind.
if valid_file_source and (ns.get('container_name', None) or not same_account):
import os
dir_name, file_name = os.path.split(path) if path else (None, '')
source_sas = create_short_lived_file_sas(cmd, source_account_name, source_account_key, share,
dir_name, file_name)
elif valid_blob_source and (ns.get('share_name', None) or not same_account):
source_sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob)
query_params = []
if source_sas:
query_params.append(source_sas.lstrip('?'))
if snapshot:
query_params.append('snapshot={}'.format(snapshot))
uri = 'https://{0}.{1}.{6}/{2}/{3}{4}{5}'.format(
source_account_name,
'blob' if valid_blob_source else 'file',
container if valid_blob_source else share,
encode_for_url(blob if valid_blob_source else path),
'?' if query_params else '',
'&'.join(query_params),
cmd.cli_ctx.cloud.suffixes.storage_endpoint)
namespace.copy_source = uri
def validate_blob_type(namespace):
if not namespace.blob_type:
namespace.blob_type = 'page' if namespace.file_path.endswith('.vhd') else 'block'
def validate_storage_data_plane_list(namespace):
if namespace.num_results == '*':
namespace.num_results = None
else:
namespace.num_results = int(namespace.num_results)
def get_content_setting_validator(settings_class, update, guess_from_file=None):
def _class_name(class_type):
return class_type.__module__ + "." + class_type.__class__.__name__
def validator(cmd, namespace):
t_base_blob_service, t_file_service, t_blob_content_settings, t_file_content_settings = cmd.get_models(
'blob.baseblobservice#BaseBlobService',
'file#FileService',
'blob.models#ContentSettings',
'file.models#ContentSettings')
# must run certain validators first for an update
if update:
validate_client_parameters(cmd, namespace)
if update and _class_name(settings_class) == _class_name(t_file_content_settings):
get_file_path_validator()(namespace)
ns = vars(namespace)
# retrieve the existing object properties for an update
if update:
account = ns.get('account_name')
key = ns.get('account_key')
cs = ns.get('connection_string')
sas = ns.get('sas_token')
if _class_name(settings_class) == _class_name(t_blob_content_settings):
client = get_storage_data_service_client(cmd.cli_ctx,
t_base_blob_service,
account,
key,
cs,
sas)
container = ns.get('container_name')
blob = ns.get('blob_name')
lease_id = ns.get('lease_id')
props = client.get_blob_properties(container, blob, lease_id=lease_id).properties.content_settings
elif _class_name(settings_class) == _class_name(t_file_content_settings):
client = get_storage_data_service_client(cmd.cli_ctx, t_file_service, account, key, cs, sas)
share = ns.get('share_name')
directory = ns.get('directory_name')
filename = ns.get('file_name')
props = client.get_file_properties(share, directory, filename).properties.content_settings
# create new properties
new_props = settings_class(
content_type=ns.pop('content_type', None),
content_disposition=ns.pop('content_disposition', None),
content_encoding=ns.pop('content_encoding', None),
content_language=ns.pop('content_language', None),
content_md5=ns.pop('content_md5', None),
cache_control=ns.pop('content_cache_control', None)
)
# if update, fill in any None values with existing
if update:
for attr in ['content_type', 'content_disposition', 'content_encoding', 'content_language', 'content_md5',
'cache_control']:
if getattr(new_props, attr) is None:
setattr(new_props, attr, getattr(props, attr))
else:
if guess_from_file:
new_props = guess_content_type(ns[guess_from_file], new_props, settings_class)
ns['content_settings'] = new_props
return validator
def validate_custom_domain(namespace):
if namespace.use_subdomain and not namespace.custom_domain:
raise ValueError('usage error: --custom-domain DOMAIN [--use-subdomain]')
def validate_encryption_services(cmd, namespace):
"""
Builds up the encryption services object for storage account operations based on the list of services passed in.
"""
if namespace.encryption_services:
t_encryption_services, t_encryption_service = get_sdk(cmd.cli_ctx, ResourceType.MGMT_STORAGE,
'EncryptionServices', 'EncryptionService', mod='models')
services = {service: t_encryption_service(enabled=True) for service in namespace.encryption_services}
namespace.encryption_services = t_encryption_services(**services)
def validate_encryption_source(cmd, namespace):
ns = vars(namespace)
key_name = ns.pop('encryption_key_name', None)
key_version = ns.pop('encryption_key_version', None)
key_vault_uri = ns.pop('encryption_key_vault', None)
if namespace.encryption_key_source == 'Microsoft.Keyvault' and not (key_name and key_version and key_vault_uri):
raise ValueError('--encryption-key-name, --encryption-key-vault, and --encryption-key-version are required '
'when --encryption-key-source=Microsoft.Keyvault is specified.')
if key_name or key_version or key_vault_uri:
if namespace.encryption_key_source != 'Microsoft.Keyvault':
raise ValueError('--encryption-key-name, --encryption-key-vault, and --encryption-key-version are not '
'applicable when --encryption-key-source=Microsoft.Keyvault is not specified.')
KeyVaultProperties = get_sdk(cmd.cli_ctx, ResourceType.MGMT_STORAGE, 'KeyVaultProperties',
mod='models')
if not KeyVaultProperties:
return
kv_prop = KeyVaultProperties(key_name=key_name, key_version=key_version, key_vault_uri=key_vault_uri)
namespace.encryption_key_vault_properties = kv_prop
def validate_entity(namespace):
""" Converts a list of key value pairs into a dictionary. Ensures that required
RowKey and PartitionKey are converted to the correct case and included. """
values = dict(x.split('=', 1) for x in namespace.entity)
keys = values.keys()
for key in keys:
if key.lower() == 'rowkey':
val = values[key]
del values[key]
values['RowKey'] = val
elif key.lower() == 'partitionkey':
val = values[key]
del values[key]
values['PartitionKey'] = val
keys = values.keys()
missing_keys = 'RowKey ' if 'RowKey' not in keys else ''
missing_keys = '{}PartitionKey'.format(missing_keys) \
if 'PartitionKey' not in keys else missing_keys
if missing_keys:
import argparse
raise argparse.ArgumentError(
None, 'incorrect usage: entity requires: {}'.format(missing_keys))
def cast_val(key, val):
""" Attempts to cast numeric values (except RowKey and PartitionKey) to numbers so they
can be queried correctly. """
if key in ['PartitionKey', 'RowKey']:
return val
def try_cast(to_type):
try:
return to_type(val)
except ValueError:
return None
return try_cast(int) or try_cast(float) or val
# ensure numbers are converted from strings so querying will work correctly
values = {key: cast_val(key, val) for key, val in values.items()}
namespace.entity = values
def validate_marker(namespace):
""" Converts a list of key value pairs into a dictionary. Ensures that required
nextrowkey and nextpartitionkey are included. """
if not namespace.marker:
return
marker = dict(x.split('=', 1) for x in namespace.marker)
expected_keys = {'nextrowkey', 'nextpartitionkey'}
for key in marker:
new_key = key.lower()
if new_key in expected_keys:
expected_keys.remove(key.lower())
val = marker[key]
del marker[key]
marker[new_key] = val
if expected_keys:
import argparse
raise argparse.ArgumentError(
None, 'incorrect usage: marker requires: {}'.format(' '.join(expected_keys)))
namespace.marker = marker
def get_file_path_validator(default_file_param=None):
""" Creates a namespace validator that splits out 'path' into 'directory_name' and 'file_name'.
Allows another path-type parameter to be named which can supply a default filename. """
def validator(namespace):
import os
if not hasattr(namespace, 'path'):
return
path = namespace.path
dir_name, file_name = os.path.split(path) if path else (None, '')
if default_file_param and '.' not in file_name:
dir_name = path
file_name = os.path.split(getattr(namespace, default_file_param))[1]
dir_name = None if dir_name in ('', '.') else dir_name
namespace.directory_name = dir_name
namespace.file_name = file_name
del namespace.path
return validator
def validate_included_datasets(cmd, namespace):
if namespace.include:
include = namespace.include
if set(include) - set('cmsd'):
help_string = '(c)opy-info (m)etadata (s)napshots (d)eleted'
raise ValueError('valid values are {} or a combination thereof.'.format(help_string))
t_blob_include = cmd.get_models('blob#Include')
namespace.include = t_blob_include('s' in include, 'm' in include, False, 'c' in include, 'd' in include)
def validate_key(namespace):
namespace.key_name = storage_account_key_options[namespace.key_name]
def validate_metadata(namespace):
if namespace.metadata:
namespace.metadata = dict(x.split('=', 1) for x in namespace.metadata)
def get_permission_help_string(permission_class):
allowed_values = [x.lower() for x in dir(permission_class) if not x.startswith('__')]
return ' '.join(['({}){}'.format(x[0], x[1:]) for x in allowed_values])
def get_permission_validator(permission_class):
allowed_values = [x.lower() for x in dir(permission_class) if not x.startswith('__')]
allowed_string = ''.join(x[0] for x in allowed_values)
def validator(namespace):
if namespace.permission:
if set(namespace.permission) - set(allowed_string):
help_string = get_permission_help_string(permission_class)
raise ValueError(
'valid values are {} or a combination thereof.'.format(help_string))
namespace.permission = permission_class(_str=namespace.permission)
return validator
def table_permission_validator(cmd, namespace):
""" A special case for table because the SDK associates the QUERY permission with 'r' """
t_table_permissions = get_table_data_type(cmd.cli_ctx, 'table', 'TablePermissions')
if namespace.permission:
if set(namespace.permission) - set('raud'):
help_string = '(r)ead/query (a)dd (u)pdate (d)elete'
raise ValueError('valid values are {} or a combination thereof.'.format(help_string))
namespace.permission = t_table_permissions(_str=namespace.permission)
def validate_container_public_access(cmd, namespace):
from .sdkutil import get_container_access_type
t_base_blob_svc = cmd.get_models('blob.baseblobservice#BaseBlobService')
if namespace.public_access:
namespace.public_access = get_container_access_type(cmd.cli_ctx, namespace.public_access.lower())
if hasattr(namespace, 'signed_identifiers'):
# must retrieve the existing ACL to simulate a patch operation because these calls
# are needlessly conflated
ns = vars(namespace)
validate_client_parameters(cmd, namespace)
account = ns.get('account_name')
key = ns.get('account_key')
cs = ns.get('connection_string')
sas = ns.get('sas_token')
client = get_storage_data_service_client(cmd.cli_ctx, t_base_blob_svc, account, key, cs, sas)
container = ns.get('container_name')
lease_id = ns.get('lease_id')
ns['signed_identifiers'] = client.get_container_acl(container, lease_id=lease_id)
def validate_select(namespace):
if namespace.select:
namespace.select = ','.join(namespace.select)
# pylint: disable=too-many-statements
def get_source_file_or_blob_service_client(cmd, namespace):
"""
Create the second file service or blob service client for batch copy command, which is used to
list the source files or blobs. If both the source account and source URI are omitted, it
indicates that user want to copy files or blobs in the same storage account, therefore the
destination client will be set None hence the command will use destination client.
"""
t_file_svc, t_block_blob_svc = cmd.get_models('file#FileService', 'blob.blockblobservice#BlockBlobService')
usage_string = 'invalid usage: supply only one of the following argument sets:' + \
'\n\t --source-uri [--source-sas]' + \
'\n\tOR --source-container' + \
'\n\tOR --source-container --source-account-name --source-account-key' + \
'\n\tOR --source-container --source-account-name --source-sas' + \
'\n\tOR --source-share --source-account-name --source-account-key' + \
'\n\tOR --source-share --source-account-name --source-account-sas'
ns = vars(namespace)
source_account = ns.pop('source_account_name', None)
source_key = ns.pop('source_account_key', None)
source_uri = ns.pop('source_uri', None)
source_sas = ns.get('source_sas', None)
source_container = ns.get('source_container', None)
source_share = ns.get('source_share', None)
if source_uri and source_account:
raise ValueError(usage_string)
if not source_uri and bool(source_container) == bool(source_share): # must be container or share
raise ValueError(usage_string)
if (not source_account) and (not source_uri):
# Set the source_client to None if neither source_account or source_uri is given. This
# indicates the command that the source files share or blob container is in the same storage
# account as the destination file share or blob container.
#
# The command itself should create the source service client since the validator can't
# access the destination client through the namespace.
#
# A few arguments check will be made as well so as not to cause ambiguity.
if source_key or source_sas:
raise ValueError('invalid usage: --source-account-name is missing; the source account is assumed to be the'
' same as the destination account. Do not provide --source-sas or --source-account-key')
ns['source_client'] = None
if 'token_credential' not in ns: # not using oauth
return
# oauth is only possible through destination, must still get source creds
source_account, source_key, source_sas = ns['account_name'], ns['account_key'], ns['sas_token']
if source_account:
if not (source_key or source_sas):
# when neither storage account key or SAS is given, try to fetch the key in the current
# subscription
source_key = _query_account_key(cmd.cli_ctx, source_account)
if source_container:
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_block_blob_svc, name=source_account, key=source_key, sas_token=source_sas)
elif source_share:
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_file_svc, name=source_account, key=source_key, sas_token=source_sas)
elif source_uri:
if source_key or source_container or source_share:
raise ValueError(usage_string)
from .storage_url_helpers import StorageResourceIdentifier
if source_sas:
source_uri = '{}{}{}'.format(source_uri, '?', source_sas.lstrip('?'))
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, source_uri)
nor_container_or_share = not identifier.container and not identifier.share
if not identifier.is_url():
raise ValueError('incorrect usage: --source-uri expects a URI')
elif identifier.blob or identifier.directory or \
identifier.filename or nor_container_or_share:
raise ValueError('incorrect usage: --source-uri has to be blob container or file share')
if identifier.sas_token:
ns['source_sas'] = identifier.sas_token
else:
source_key = _query_account_key(cmd.cli_ctx, identifier.account_name)
if identifier.container:
ns['source_container'] = identifier.container
if identifier.account_name != ns.get('account_name'):
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_block_blob_svc, name=identifier.account_name, key=source_key,
sas_token=identifier.sas_token)
elif identifier.share:
ns['source_share'] = identifier.share
if identifier.account_name != ns.get('account_name'):
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_file_svc, name=identifier.account_name, key=source_key,
sas_token=identifier.sas_token)
def add_progress_callback(cmd, namespace):
def _update_progress(current, total):
hook = cmd.cli_ctx.get_progress_controller(det=True)
if total:
hook.add(message='Alive', value=current, total_val=total)
if total == current:
hook.end()
if not namespace.no_progress:
namespace.progress_callback = _update_progress
del namespace.no_progress
def process_container_delete_parameters(cmd, namespace):
"""Process the parameters for storage container delete command"""
# check whether to use mgmt or data-plane
if namespace.bypass_immutability_policy:
# use management-plane
namespace.processed_account_name = namespace.account_name
namespace.processed_resource_group, namespace.mgmt_client = _query_account_rg(
cmd.cli_ctx, namespace.account_name)
del namespace.auth_mode
else:
# use data-plane, like before
validate_client_parameters(cmd, namespace)
def process_blob_download_batch_parameters(cmd, namespace):
"""Process the parameters for storage blob download command"""
import os
# 1. quick check
if not os.path.exists(namespace.destination) or not os.path.isdir(namespace.destination):
raise ValueError('incorrect usage: destination must be an existing directory')
# 2. try to extract account name and container name from source string
_process_blob_batch_container_parameters(cmd, namespace)
# 3. Call validators
add_progress_callback(cmd, namespace)
def process_blob_upload_batch_parameters(cmd, namespace):
"""Process the source and destination of storage blob upload command"""
import os
# 1. quick check
if not os.path.exists(namespace.source) or not os.path.isdir(namespace.source):
raise ValueError('incorrect usage: source must be an existing directory')
# 2. try to extract account name and container name from destination string
_process_blob_batch_container_parameters(cmd, namespace, source=False)
# 3. collect the files to be uploaded
namespace.source = os.path.realpath(namespace.source)
namespace.source_files = [c for c in glob_files_locally(namespace.source, namespace.pattern)]
# 4. determine blob type
if namespace.blob_type is None:
vhd_files = [f for f in namespace.source_files if f[0].endswith('.vhd')]
if any(vhd_files) and len(vhd_files) == len(namespace.source_files):
# when all the listed files are vhd files use page
namespace.blob_type = 'page'
elif any(vhd_files):
# source files contain vhd files but not all of them
from knack.util import CLIError
raise CLIError("""Fail to guess the required blob type. Type of the files to be
uploaded are not consistent. Default blob type for .vhd files is "page", while
others are "block". You can solve this problem by either explicitly set the blob
type or ensure the pattern matches a correct set of files.""")
else:
namespace.blob_type = 'block'
# 5. call other validators
validate_metadata(namespace)
t_blob_content_settings = cmd.loader.get_sdk('blob.models#ContentSettings')
get_content_setting_validator(t_blob_content_settings, update=False)(cmd, namespace)
add_progress_callback(cmd, namespace)
def process_blob_delete_batch_parameters(cmd, namespace):
_process_blob_batch_container_parameters(cmd, namespace)
def _process_blob_batch_container_parameters(cmd, namespace, source=True):
"""Process the container parameters for storage blob batch commands before populating args from environment."""
if source:
container_arg, container_name_arg = 'source', 'source_container_name'
else:
# destination
container_arg, container_name_arg = 'destination', 'destination_container_name'
# try to extract account name and container name from source string
from .storage_url_helpers import StorageResourceIdentifier
container_arg_val = getattr(namespace, container_arg) # either a url or name
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, container_arg_val)
if not identifier.is_url():
setattr(namespace, container_name_arg, container_arg_val)
elif identifier.blob:
raise ValueError('incorrect usage: {} should be either a container URL or name'.format(container_arg))
else:
setattr(namespace, container_name_arg, identifier.container)
if namespace.account_name is None:
namespace.account_name = identifier.account_name
elif namespace.account_name != identifier.account_name:
raise ValueError('The given storage account name is not consistent with the '
'account name in the destination URL')
# if no sas-token is given and the container url contains one, use it
if not namespace.sas_token and identifier.sas_token:
namespace.sas_token = identifier.sas_token
# Finally, grab missing storage connection parameters from environment variables
validate_client_parameters(cmd, namespace)
def process_file_upload_batch_parameters(cmd, namespace):
"""Process the parameters of storage file batch upload command"""
import os
# 1. quick check
if not os.path.exists(namespace.source):
raise ValueError('incorrect usage: source {} does not exist'.format(namespace.source))
if not os.path.isdir(namespace.source):
raise ValueError('incorrect usage: source must be a directory')
# 2. try to extract account name and container name from destination string
from .storage_url_helpers import StorageResourceIdentifier
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, namespace.destination)
if identifier.is_url():
if identifier.filename or identifier.directory:
raise ValueError('incorrect usage: destination must be a file share url')
namespace.destination = identifier.share
if not namespace.account_name:
namespace.account_name = identifier.account_name
namespace.source = os.path.realpath(namespace.source)
def process_file_download_batch_parameters(cmd, namespace):
"""Process the parameters for storage file batch download command"""
import os
# 1. quick check
if not os.path.exists(namespace.destination) or not os.path.isdir(namespace.destination):
raise ValueError('incorrect usage: destination must be an existing directory')
# 2. try to extract account name and share name from source string
process_file_batch_source_parameters(cmd, namespace)
def process_file_batch_source_parameters(cmd, namespace):
from .storage_url_helpers import StorageResourceIdentifier
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, namespace.source)
if identifier.is_url():
if identifier.filename or identifier.directory:
raise ValueError('incorrect usage: source should be either share URL or name')
namespace.source = identifier.share
if not namespace.account_name:
namespace.account_name = identifier.account_name
def process_file_download_namespace(namespace):
import os
get_file_path_validator()(namespace)
dest = namespace.file_path
if not dest or os.path.isdir(dest):
namespace.file_path = os.path.join(dest, namespace.file_name) \
if dest else namespace.file_name
def process_metric_update_namespace(namespace):
import argparse
namespace.hour = namespace.hour == 'true'
namespace.minute = namespace.minute == 'true'
namespace.api = namespace.api == 'true' if namespace.api else None
if namespace.hour is None and namespace.minute is None:
raise argparse.ArgumentError(
None, 'incorrect usage: must specify --hour and/or --minute')
if (namespace.hour or namespace.minute) and namespace.api is None:
raise argparse.ArgumentError(
None, 'incorrect usage: specify --api when hour or minute metrics are enabled')
def validate_subnet(cmd, namespace):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
subnet = namespace.subnet
subnet_is_id = is_valid_resource_id(subnet)
vnet = namespace.vnet_name
if (subnet_is_id and not vnet) or (not subnet and not vnet):
return
elif subnet and not subnet_is_id and vnet:
namespace.subnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet,
child_type_1='subnets',
child_name_1=subnet)
else:
from knack.util import CLIError
raise CLIError('incorrect usage: [--subnet ID | --subnet NAME --vnet-name NAME]')
def get_datetime_type(to_string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
from datetime import datetime
def datetime_type(string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
accepted_date_formats = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%MZ',
'%Y-%m-%dT%HZ', '%Y-%m-%d']
for form in accepted_date_formats:
try:
if to_string:
return datetime.strptime(string, form).strftime(form)
return datetime.strptime(string, form)
except ValueError:
continue
raise ValueError("Input '{}' not valid. Valid example: 2000-12-31T12:59:59Z".format(string))
return datetime_type
def ipv4_range_type(string):
""" Validates an IPv4 address or address range. """
import re
ip_format = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
if not re.match("^{}$".format(ip_format), string):
if not re.match("^{}-{}$".format(ip_format, ip_format), string):
raise ValueError
return string
def resource_type_type(loader):
""" Returns a function which validates that resource types string contains only a combination of service,
container, and object. Their shorthand representations are s, c, and o. """
def impl(string):
t_resources = loader.get_models('common.models#ResourceTypes')
if set(string) - set("sco"):
raise ValueError
return t_resources(_str=''.join(set(string)))
return impl
def services_type(loader):
""" Returns a function which validates that services string contains only a combination of blob, queue, table,
and file. Their shorthand representations are b, q, t, and f. """
def impl(string):
t_services = loader.get_models('common.models#Services')
if set(string) - set("bqtf"):
raise ValueError
return t_services(_str=''.join(set(string)))
return impl
def get_char_options_validator(types, property_name):
def _validator(namespace):
service_types = set(getattr(namespace, property_name, list()))
if not service_types:
raise ValueError('Missing options --{}.'.format(property_name.replace('_', '-')))
if service_types - set(types):
raise ValueError(
'--{}: only valid values are: {}.'.format(property_name.replace('_', '-'), ', '.join(types)))
setattr(namespace, property_name, service_types)
return _validator
def page_blob_tier_validator(cmd, namespace):
if not namespace.tier:
return
if namespace.blob_type != 'page' and namespace.tier:
raise ValueError('Blob tier is only applicable to page blobs on premium storage accounts.')
try:
namespace.tier = getattr(cmd.get_models('blob.models#PremiumPageBlobTier'), namespace.tier)
except AttributeError:
from azure.cli.command_modules.storage.sdkutil import get_blob_tier_names
raise ValueError('Unknown premium page blob tier name. Choose among {}'.format(', '.join(
get_blob_tier_names(cmd.cli_ctx, 'PremiumPageBlobTier'))))
def block_blob_tier_validator(cmd, namespace):
if not namespace.tier:
return
if namespace.blob_type != 'block' and namespace.tier:
raise ValueError('Blob tier is only applicable to block blobs on standard storage accounts.')
try:
namespace.tier = getattr(cmd.get_models('blob.models#StandardBlobTier'), namespace.tier)
except AttributeError:
from azure.cli.command_modules.storage.sdkutil import get_blob_tier_names
raise ValueError('Unknown block blob tier name. Choose among {}'.format(', '.join(
get_blob_tier_names(cmd.cli_ctx, 'StandardBlobTier'))))
def blob_tier_validator(cmd, namespace):
if namespace.blob_type == 'page':
page_blob_tier_validator(cmd, namespace)
elif namespace.blob_type == 'block':
block_blob_tier_validator(cmd, namespace)
else:
raise ValueError('Blob tier is only applicable to block or page blob.')
|
en
| 0.761878
|
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint: disable=protected-access # Utilities # pylint: disable=inconsistent-return-statements,too-many-lines Query the storage account key. This is used when the customer doesn't offer account key but name. #StorageAccountKeys') # of type: models.storage_account_list_keys_result#StorageAccountListKeysResult # pylint: disable=no-member Query the storage account's resource group, which the mgmt sdk requires. #TokenCredential') # region PARAMETER VALIDATORS Processes the resource group parameter from the account name Retrieves storage connection parameters from environment variables and parses out connection string into account name and key # give warning if there are account key args being ignored # if connection string supplied or in environment variables, extract account key and name # otherwise, simply try to retrieve the remaining variables from environment variables # strip the '?' from sas token. the portal and command line are returns sas token in different # forms # if account name is specified but no key, attempt to query Validate the parameters referenced to a blob source and create the source URI from them. # source as blob # source credential clues # source in the form of an uri # simplest scenario--no further processing necessary # must run first to resolve storage account # determine if the copy will happen in the same storage account # neither source account name or key is given, assume that user intends to copy blob in # the same account # the source account name is same as the destination account name # the source account is different from destination account but the key is missing # try to query one. # else: both source account name and key are given by user # pylint: disable=too-many-statements # source as blob # source as file # source credential clues # source in the form of an uri # ensure either a file or blob source is specified # must run first to resolve storage account # assume that user intends to copy blob in the same account # determine if the copy will happen in the same storage account # the source account is different from destination account but the key is missing try to query one. # Both source account name and either key or sas (or both) are now available # generate a sas token even in the same account when the source and destination are not the same kind. #BaseBlobService', #FileService', #ContentSettings', #ContentSettings') # must run certain validators first for an update # retrieve the existing object properties for an update # create new properties # if update, fill in any None values with existing Builds up the encryption services object for storage account operations based on the list of services passed in. Converts a list of key value pairs into a dictionary. Ensures that required RowKey and PartitionKey are converted to the correct case and included. Attempts to cast numeric values (except RowKey and PartitionKey) to numbers so they can be queried correctly. # ensure numbers are converted from strings so querying will work correctly Converts a list of key value pairs into a dictionary. Ensures that required nextrowkey and nextpartitionkey are included. Creates a namespace validator that splits out 'path' into 'directory_name' and 'file_name'. Allows another path-type parameter to be named which can supply a default filename. #Include') A special case for table because the SDK associates the QUERY permission with 'r' #BaseBlobService') # must retrieve the existing ACL to simulate a patch operation because these calls # are needlessly conflated # pylint: disable=too-many-statements Create the second file service or blob service client for batch copy command, which is used to list the source files or blobs. If both the source account and source URI are omitted, it indicates that user want to copy files or blobs in the same storage account, therefore the destination client will be set None hence the command will use destination client. #FileService', 'blob.blockblobservice#BlockBlobService') # must be container or share # Set the source_client to None if neither source_account or source_uri is given. This # indicates the command that the source files share or blob container is in the same storage # account as the destination file share or blob container. # # The command itself should create the source service client since the validator can't # access the destination client through the namespace. # # A few arguments check will be made as well so as not to cause ambiguity. # not using oauth # oauth is only possible through destination, must still get source creds # when neither storage account key or SAS is given, try to fetch the key in the current # subscription Process the parameters for storage container delete command # check whether to use mgmt or data-plane # use management-plane # use data-plane, like before Process the parameters for storage blob download command # 1. quick check # 2. try to extract account name and container name from source string # 3. Call validators Process the source and destination of storage blob upload command # 1. quick check # 2. try to extract account name and container name from destination string # 3. collect the files to be uploaded # 4. determine blob type # when all the listed files are vhd files use page # source files contain vhd files but not all of them Fail to guess the required blob type. Type of the files to be uploaded are not consistent. Default blob type for .vhd files is "page", while others are "block". You can solve this problem by either explicitly set the blob type or ensure the pattern matches a correct set of files. # 5. call other validators #ContentSettings') Process the container parameters for storage blob batch commands before populating args from environment. # destination # try to extract account name and container name from source string # either a url or name # if no sas-token is given and the container url contains one, use it # Finally, grab missing storage connection parameters from environment variables Process the parameters of storage file batch upload command # 1. quick check # 2. try to extract account name and container name from destination string Process the parameters for storage file batch download command # 1. quick check # 2. try to extract account name and share name from source string Validates UTC datetime. Examples of accepted forms: 2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 Validates UTC datetime. Examples of accepted forms: 2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 Validates an IPv4 address or address range. Returns a function which validates that resource types string contains only a combination of service, container, and object. Their shorthand representations are s, c, and o. #ResourceTypes') Returns a function which validates that services string contains only a combination of blob, queue, table, and file. Their shorthand representations are b, q, t, and f. #Services') #PremiumPageBlobTier'), namespace.tier) #StandardBlobTier'), namespace.tier)
| 1.916876
| 2
|
src/balancing_platform/video_processing.py
|
amalieKo/Balancing-Platform
| 0
|
6625922
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Object tracking based on a HSV-mask,
and will contour out the biggest object
and find its coordinates in the x and y plane.
Code by: <NAME>, Dated: 05.10-2018
Contact: <EMAIL>
Website: https://github.com/magnusoy/Balancing-Platform
"""
# Importing packages
import cv2
import numpy as np
class ObjectDetection(object):
"""Finds biggest object according to HSV filtering.
Returns the coordinates in x and y plane."""
def __init__(self, capture, watch):
self.DEBUG = watch
self.cap = capture
def getCoordinates(self):
"""Finds the biggest object.
Return: X and Y coordinates from center of the object"""
_, frame = self.cap.read()
# Convert RGB to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Defining the color in HSV
# Find limits using morphological_transformation.py
lower_color = np.array([29, 125, 85])
upper_color = np.array([39, 181, 182])
# Creates a mask
mask = cv2.inRange(hsv, lower_color, upper_color)
# Enlarge the mask
kernel = np.ones((5, 5), np.uint8)
dilation = cv2.dilate(mask, kernel)
# Finding the contours
im2, contours, hierarchy = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Mark up only the largest contour and draw centroid
if len(contours) > 0:
ball = max(contours, key=cv2.contourArea)
cv2.drawContours(frame, ball, -1, (0, 255, 0), 3)
M = cv2.moments(ball)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if self.DEBUG:
cv2.circle(frame, center, 3, (0, 0, 255), -1)
cv2.putText(frame, "centroid", (center[0] + 10, center[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255),
1)
cv2.putText(frame, "(" + str(center[0]) + "," + str(center[1]) + ")", (center[0] + 10, center[1] + 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1)
self.watch(frame, dilation)
return center
else:
if self.DEBUG:
self.watch(frame, dilation)
return 0, 0
@staticmethod
def watch(frame, dilation):
"""Works as a debug functionality if user
wants to see the frame and mask."""
cv2.imshow("Frame", frame)
cv2.imshow("Mask", dilation)
def stop(self):
"""Releases the capture and close all frames running.
Return: True when everything is closed."""
self.cap.release()
cv2.destroyAllWindows()
return True
# Simple example of usage.
if __name__ == '__main__':
cap = cv2.VideoCapture(1)
cap.set(propId=3, value=640)
cap.set(propId=4, value=480)
objectDetection = ObjectDetection(cap, watch=True)
while True:
coordinates = objectDetection.getCoordinates()
print(coordinates)
# Break loop with ESC-key
key = cv2.waitKey(5) & 0xFF
if key == 27:
running = objectDetection.stop()
break
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Object tracking based on a HSV-mask,
and will contour out the biggest object
and find its coordinates in the x and y plane.
Code by: <NAME>, Dated: 05.10-2018
Contact: <EMAIL>
Website: https://github.com/magnusoy/Balancing-Platform
"""
# Importing packages
import cv2
import numpy as np
class ObjectDetection(object):
"""Finds biggest object according to HSV filtering.
Returns the coordinates in x and y plane."""
def __init__(self, capture, watch):
self.DEBUG = watch
self.cap = capture
def getCoordinates(self):
"""Finds the biggest object.
Return: X and Y coordinates from center of the object"""
_, frame = self.cap.read()
# Convert RGB to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Defining the color in HSV
# Find limits using morphological_transformation.py
lower_color = np.array([29, 125, 85])
upper_color = np.array([39, 181, 182])
# Creates a mask
mask = cv2.inRange(hsv, lower_color, upper_color)
# Enlarge the mask
kernel = np.ones((5, 5), np.uint8)
dilation = cv2.dilate(mask, kernel)
# Finding the contours
im2, contours, hierarchy = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Mark up only the largest contour and draw centroid
if len(contours) > 0:
ball = max(contours, key=cv2.contourArea)
cv2.drawContours(frame, ball, -1, (0, 255, 0), 3)
M = cv2.moments(ball)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if self.DEBUG:
cv2.circle(frame, center, 3, (0, 0, 255), -1)
cv2.putText(frame, "centroid", (center[0] + 10, center[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255),
1)
cv2.putText(frame, "(" + str(center[0]) + "," + str(center[1]) + ")", (center[0] + 10, center[1] + 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1)
self.watch(frame, dilation)
return center
else:
if self.DEBUG:
self.watch(frame, dilation)
return 0, 0
@staticmethod
def watch(frame, dilation):
"""Works as a debug functionality if user
wants to see the frame and mask."""
cv2.imshow("Frame", frame)
cv2.imshow("Mask", dilation)
def stop(self):
"""Releases the capture and close all frames running.
Return: True when everything is closed."""
self.cap.release()
cv2.destroyAllWindows()
return True
# Simple example of usage.
if __name__ == '__main__':
cap = cv2.VideoCapture(1)
cap.set(propId=3, value=640)
cap.set(propId=4, value=480)
objectDetection = ObjectDetection(cap, watch=True)
while True:
coordinates = objectDetection.getCoordinates()
print(coordinates)
# Break loop with ESC-key
key = cv2.waitKey(5) & 0xFF
if key == 27:
running = objectDetection.stop()
break
|
en
| 0.792214
|
#!/usr/bin/env python # -*- coding: utf-8 -*- Object tracking based on a HSV-mask, and will contour out the biggest object and find its coordinates in the x and y plane. Code by: <NAME>, Dated: 05.10-2018 Contact: <EMAIL> Website: https://github.com/magnusoy/Balancing-Platform # Importing packages Finds biggest object according to HSV filtering. Returns the coordinates in x and y plane. Finds the biggest object. Return: X and Y coordinates from center of the object # Convert RGB to HSV # Defining the color in HSV # Find limits using morphological_transformation.py # Creates a mask # Enlarge the mask # Finding the contours # Mark up only the largest contour and draw centroid Works as a debug functionality if user wants to see the frame and mask. Releases the capture and close all frames running. Return: True when everything is closed. # Simple example of usage. # Break loop with ESC-key
| 3.183322
| 3
|
venv/lib/python3.6/site-packages/ansible_collections/community/aws/plugins/modules/aws_waf_web_acl.py
|
usegalaxy-no/usegalaxy
| 1
|
6625923
|
<filename>venv/lib/python3.6/site-packages/ansible_collections/community/aws/plugins/modules/aws_waf_web_acl.py
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
module: aws_waf_web_acl
short_description: Create and delete WAF Web ACLs.
version_added: 1.0.0
description:
- Read the AWS documentation for WAF
U(https://aws.amazon.com/documentation/waf/).
author:
- <NAME> (@mmochan)
- <NAME> (@willthames)
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
options:
name:
description: Name of the Web Application Firewall ACL to manage.
required: yes
type: str
default_action:
description: The action that you want AWS WAF to take when a request doesn't
match the criteria specified in any of the Rule objects that are associated with the WebACL.
choices:
- block
- allow
- count
type: str
state:
description: Whether the Web ACL should be present or absent.
choices:
- present
- absent
default: present
type: str
metric_name:
description:
- A friendly name or description for the metrics for this WebACL.
- The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace.
- You can't change I(metric_name) after you create the WebACL.
- Metric name will default to I(name) with disallowed characters stripped out.
type: str
rules:
description:
- A list of rules that the Web ACL will enforce.
type: list
elements: dict
suboptions:
name:
description: Name of the rule.
type: str
required: true
action:
description: The action to perform.
type: str
required: true
priority:
description: The priority of the action. Priorities must be unique. Lower numbered priorities are evaluated first.
type: int
required: true
type:
description: The type of rule.
choices:
- rate_based
- regular
type: str
purge_rules:
description:
- Whether to remove rules that aren't passed with I(rules).
default: False
type: bool
waf_regional:
description: Whether to use waf-regional module.
default: false
required: no
type: bool
'''
EXAMPLES = r'''
- name: create web ACL
community.aws.aws_waf_web_acl:
name: my_web_acl
rules:
- name: my_rule
priority: 1
action: block
default_action: block
purge_rules: yes
state: present
- name: delete the web acl
community.aws.aws_waf_web_acl:
name: my_web_acl
state: absent
'''
RETURN = r'''
web_acl:
description: contents of the Web ACL.
returned: always
type: complex
contains:
default_action:
description: Default action taken by the Web ACL if no rules match.
returned: always
type: dict
sample:
type: BLOCK
metric_name:
description: Metric name used as an identifier.
returned: always
type: str
sample: mywebacl
name:
description: Friendly name of the Web ACL.
returned: always
type: str
sample: my web acl
rules:
description: List of rules.
returned: always
type: complex
contains:
action:
description: Action taken by the WAF when the rule matches.
returned: always
type: complex
sample:
type: ALLOW
priority:
description: priority number of the rule (lower numbers are run first).
returned: always
type: int
sample: 2
rule_id:
description: Rule ID.
returned: always
type: str
sample: a6fc7ab5-287b-479f-8004-7fd0399daf75
type:
description: Type of rule (either REGULAR or RATE_BASED).
returned: always
type: str
sample: REGULAR
web_acl_id:
description: Unique identifier of Web ACL.
returned: always
type: str
sample: 10fff965-4b6b-46e2-9d78-24f6d2e2d21c
'''
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
import re
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
from ansible_collections.amazon.aws.plugins.module_utils.waf import (
list_regional_rules_with_backoff,
list_regional_web_acls_with_backoff,
list_rules_with_backoff,
list_web_acls_with_backoff,
run_func_with_change_token_backoff,
)
def get_web_acl_by_name(client, module, name):
acls = [d['WebACLId'] for d in list_web_acls(client, module) if d['Name'] == name]
if acls:
return acls[0]
else:
return acls
def create_rule_lookup(client, module):
if client.__class__.__name__ == 'WAF':
try:
rules = list_rules_with_backoff(client)
return dict((rule['Name'], rule) for rule in rules)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list rules')
elif client.__class__.__name__ == 'WAFRegional':
try:
rules = list_regional_rules_with_backoff(client)
return dict((rule['Name'], rule) for rule in rules)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list regional rules')
def get_web_acl(client, module, web_acl_id):
try:
return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get Web ACL with id %s' % web_acl_id)
def list_web_acls(client, module,):
if client.__class__.__name__ == 'WAF':
try:
return list_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get Web ACLs')
elif client.__class__.__name__ == 'WAFRegional':
try:
return list_regional_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get Web ACLs')
def find_and_update_web_acl(client, module, web_acl_id):
acl = get_web_acl(client, module, web_acl_id)
rule_lookup = create_rule_lookup(client, module)
existing_rules = acl['Rules']
desired_rules = [{'RuleId': rule_lookup[rule['name']]['RuleId'],
'Priority': rule['priority'],
'Action': {'Type': rule['action'].upper()},
'Type': rule.get('type', 'regular').upper()}
for rule in module.params['rules']]
missing = [rule for rule in desired_rules if rule not in existing_rules]
extras = []
if module.params['purge_rules']:
extras = [rule for rule in existing_rules if rule not in desired_rules]
insertions = [format_for_update(rule, 'INSERT') for rule in missing]
deletions = [format_for_update(rule, 'DELETE') for rule in extras]
changed = bool(insertions + deletions)
# Purge rules before adding new ones in case a deletion shares the same
# priority as an insertion.
params = {
'WebACLId': acl['WebACLId'],
'DefaultAction': acl['DefaultAction']
}
change_tokens = []
if deletions:
try:
params['Updates'] = deletions
result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
change_tokens.append(result['ChangeToken'])
get_waiter(
client, 'change_token_in_sync',
).wait(
ChangeToken=result['ChangeToken']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not update Web ACL')
if insertions:
try:
params['Updates'] = insertions
result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
change_tokens.append(result['ChangeToken'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not update Web ACL')
if change_tokens:
for token in change_tokens:
get_waiter(
client, 'change_token_in_sync',
).wait(
ChangeToken=token
)
if changed:
acl = get_web_acl(client, module, web_acl_id)
return changed, acl
def format_for_update(rule, action):
return dict(
Action=action,
ActivatedRule=dict(
Priority=rule['Priority'],
RuleId=rule['RuleId'],
Action=dict(
Type=rule['Action']['Type']
)
)
)
def remove_rules_from_web_acl(client, module, web_acl_id):
acl = get_web_acl(client, module, web_acl_id)
deletions = [format_for_update(rule, 'DELETE') for rule in acl['Rules']]
try:
params = {'WebACLId': acl['WebACLId'], 'DefaultAction': acl['DefaultAction'], 'Updates': deletions}
run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not remove rule')
def ensure_web_acl_present(client, module):
changed = False
result = None
name = module.params['name']
web_acl_id = get_web_acl_by_name(client, module, name)
if web_acl_id:
(changed, result) = find_and_update_web_acl(client, module, web_acl_id)
else:
metric_name = module.params['metric_name']
if not metric_name:
metric_name = re.sub(r'[^A-Za-z0-9]', '', module.params['name'])
default_action = module.params['default_action'].upper()
try:
params = {'Name': name, 'MetricName': metric_name, 'DefaultAction': {'Type': default_action}}
new_web_acl = run_func_with_change_token_backoff(client, module, params, client.create_web_acl)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not create Web ACL')
(changed, result) = find_and_update_web_acl(client, module, new_web_acl['WebACL']['WebACLId'])
return changed, result
def ensure_web_acl_absent(client, module):
web_acl_id = get_web_acl_by_name(client, module, module.params['name'])
if web_acl_id:
web_acl = get_web_acl(client, module, web_acl_id)
if web_acl['Rules']:
remove_rules_from_web_acl(client, module, web_acl_id)
try:
run_func_with_change_token_backoff(client, module, {'WebACLId': web_acl_id}, client.delete_web_acl, wait=True)
return True, {}
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not delete Web ACL')
return False, {}
def main():
argument_spec = dict(
name=dict(required=True),
default_action=dict(choices=['block', 'allow', 'count']),
metric_name=dict(),
state=dict(default='present', choices=['present', 'absent']),
rules=dict(type='list', elements='dict'),
purge_rules=dict(type='bool', default=False),
waf_regional=dict(type='bool', default=False)
)
module = AnsibleAWSModule(argument_spec=argument_spec,
required_if=[['state', 'present', ['default_action', 'rules']]])
state = module.params.get('state')
resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
client = module.client(resource)
if state == 'present':
(changed, results) = ensure_web_acl_present(client, module)
else:
(changed, results) = ensure_web_acl_absent(client, module)
module.exit_json(changed=changed, web_acl=camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()
|
<filename>venv/lib/python3.6/site-packages/ansible_collections/community/aws/plugins/modules/aws_waf_web_acl.py
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
module: aws_waf_web_acl
short_description: Create and delete WAF Web ACLs.
version_added: 1.0.0
description:
- Read the AWS documentation for WAF
U(https://aws.amazon.com/documentation/waf/).
author:
- <NAME> (@mmochan)
- <NAME> (@willthames)
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
options:
name:
description: Name of the Web Application Firewall ACL to manage.
required: yes
type: str
default_action:
description: The action that you want AWS WAF to take when a request doesn't
match the criteria specified in any of the Rule objects that are associated with the WebACL.
choices:
- block
- allow
- count
type: str
state:
description: Whether the Web ACL should be present or absent.
choices:
- present
- absent
default: present
type: str
metric_name:
description:
- A friendly name or description for the metrics for this WebACL.
- The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace.
- You can't change I(metric_name) after you create the WebACL.
- Metric name will default to I(name) with disallowed characters stripped out.
type: str
rules:
description:
- A list of rules that the Web ACL will enforce.
type: list
elements: dict
suboptions:
name:
description: Name of the rule.
type: str
required: true
action:
description: The action to perform.
type: str
required: true
priority:
description: The priority of the action. Priorities must be unique. Lower numbered priorities are evaluated first.
type: int
required: true
type:
description: The type of rule.
choices:
- rate_based
- regular
type: str
purge_rules:
description:
- Whether to remove rules that aren't passed with I(rules).
default: False
type: bool
waf_regional:
description: Whether to use waf-regional module.
default: false
required: no
type: bool
'''
EXAMPLES = r'''
- name: create web ACL
community.aws.aws_waf_web_acl:
name: my_web_acl
rules:
- name: my_rule
priority: 1
action: block
default_action: block
purge_rules: yes
state: present
- name: delete the web acl
community.aws.aws_waf_web_acl:
name: my_web_acl
state: absent
'''
RETURN = r'''
web_acl:
description: contents of the Web ACL.
returned: always
type: complex
contains:
default_action:
description: Default action taken by the Web ACL if no rules match.
returned: always
type: dict
sample:
type: BLOCK
metric_name:
description: Metric name used as an identifier.
returned: always
type: str
sample: mywebacl
name:
description: Friendly name of the Web ACL.
returned: always
type: str
sample: my web acl
rules:
description: List of rules.
returned: always
type: complex
contains:
action:
description: Action taken by the WAF when the rule matches.
returned: always
type: complex
sample:
type: ALLOW
priority:
description: priority number of the rule (lower numbers are run first).
returned: always
type: int
sample: 2
rule_id:
description: Rule ID.
returned: always
type: str
sample: a6fc7ab5-287b-479f-8004-7fd0399daf75
type:
description: Type of rule (either REGULAR or RATE_BASED).
returned: always
type: str
sample: REGULAR
web_acl_id:
description: Unique identifier of Web ACL.
returned: always
type: str
sample: 10fff965-4b6b-46e2-9d78-24f6d2e2d21c
'''
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
import re
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
from ansible_collections.amazon.aws.plugins.module_utils.waf import (
list_regional_rules_with_backoff,
list_regional_web_acls_with_backoff,
list_rules_with_backoff,
list_web_acls_with_backoff,
run_func_with_change_token_backoff,
)
def get_web_acl_by_name(client, module, name):
acls = [d['WebACLId'] for d in list_web_acls(client, module) if d['Name'] == name]
if acls:
return acls[0]
else:
return acls
def create_rule_lookup(client, module):
if client.__class__.__name__ == 'WAF':
try:
rules = list_rules_with_backoff(client)
return dict((rule['Name'], rule) for rule in rules)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list rules')
elif client.__class__.__name__ == 'WAFRegional':
try:
rules = list_regional_rules_with_backoff(client)
return dict((rule['Name'], rule) for rule in rules)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list regional rules')
def get_web_acl(client, module, web_acl_id):
try:
return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get Web ACL with id %s' % web_acl_id)
def list_web_acls(client, module,):
if client.__class__.__name__ == 'WAF':
try:
return list_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get Web ACLs')
elif client.__class__.__name__ == 'WAFRegional':
try:
return list_regional_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get Web ACLs')
def find_and_update_web_acl(client, module, web_acl_id):
acl = get_web_acl(client, module, web_acl_id)
rule_lookup = create_rule_lookup(client, module)
existing_rules = acl['Rules']
desired_rules = [{'RuleId': rule_lookup[rule['name']]['RuleId'],
'Priority': rule['priority'],
'Action': {'Type': rule['action'].upper()},
'Type': rule.get('type', 'regular').upper()}
for rule in module.params['rules']]
missing = [rule for rule in desired_rules if rule not in existing_rules]
extras = []
if module.params['purge_rules']:
extras = [rule for rule in existing_rules if rule not in desired_rules]
insertions = [format_for_update(rule, 'INSERT') for rule in missing]
deletions = [format_for_update(rule, 'DELETE') for rule in extras]
changed = bool(insertions + deletions)
# Purge rules before adding new ones in case a deletion shares the same
# priority as an insertion.
params = {
'WebACLId': acl['WebACLId'],
'DefaultAction': acl['DefaultAction']
}
change_tokens = []
if deletions:
try:
params['Updates'] = deletions
result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
change_tokens.append(result['ChangeToken'])
get_waiter(
client, 'change_token_in_sync',
).wait(
ChangeToken=result['ChangeToken']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not update Web ACL')
if insertions:
try:
params['Updates'] = insertions
result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
change_tokens.append(result['ChangeToken'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not update Web ACL')
if change_tokens:
for token in change_tokens:
get_waiter(
client, 'change_token_in_sync',
).wait(
ChangeToken=token
)
if changed:
acl = get_web_acl(client, module, web_acl_id)
return changed, acl
def format_for_update(rule, action):
return dict(
Action=action,
ActivatedRule=dict(
Priority=rule['Priority'],
RuleId=rule['RuleId'],
Action=dict(
Type=rule['Action']['Type']
)
)
)
def remove_rules_from_web_acl(client, module, web_acl_id):
acl = get_web_acl(client, module, web_acl_id)
deletions = [format_for_update(rule, 'DELETE') for rule in acl['Rules']]
try:
params = {'WebACLId': acl['WebACLId'], 'DefaultAction': acl['DefaultAction'], 'Updates': deletions}
run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not remove rule')
def ensure_web_acl_present(client, module):
changed = False
result = None
name = module.params['name']
web_acl_id = get_web_acl_by_name(client, module, name)
if web_acl_id:
(changed, result) = find_and_update_web_acl(client, module, web_acl_id)
else:
metric_name = module.params['metric_name']
if not metric_name:
metric_name = re.sub(r'[^A-Za-z0-9]', '', module.params['name'])
default_action = module.params['default_action'].upper()
try:
params = {'Name': name, 'MetricName': metric_name, 'DefaultAction': {'Type': default_action}}
new_web_acl = run_func_with_change_token_backoff(client, module, params, client.create_web_acl)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not create Web ACL')
(changed, result) = find_and_update_web_acl(client, module, new_web_acl['WebACL']['WebACLId'])
return changed, result
def ensure_web_acl_absent(client, module):
web_acl_id = get_web_acl_by_name(client, module, module.params['name'])
if web_acl_id:
web_acl = get_web_acl(client, module, web_acl_id)
if web_acl['Rules']:
remove_rules_from_web_acl(client, module, web_acl_id)
try:
run_func_with_change_token_backoff(client, module, {'WebACLId': web_acl_id}, client.delete_web_acl, wait=True)
return True, {}
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not delete Web ACL')
return False, {}
def main():
argument_spec = dict(
name=dict(required=True),
default_action=dict(choices=['block', 'allow', 'count']),
metric_name=dict(),
state=dict(default='present', choices=['present', 'absent']),
rules=dict(type='list', elements='dict'),
purge_rules=dict(type='bool', default=False),
waf_regional=dict(type='bool', default=False)
)
module = AnsibleAWSModule(argument_spec=argument_spec,
required_if=[['state', 'present', ['default_action', 'rules']]])
state = module.params.get('state')
resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
client = module.client(resource)
if state == 'present':
(changed, results) = ensure_web_acl_present(client, module)
else:
(changed, results) = ensure_web_acl_absent(client, module)
module.exit_json(changed=changed, web_acl=camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()
|
en
| 0.742233
|
#!/usr/bin/python # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) module: aws_waf_web_acl short_description: Create and delete WAF Web ACLs. version_added: 1.0.0 description: - Read the AWS documentation for WAF U(https://aws.amazon.com/documentation/waf/). author: - <NAME> (@mmochan) - <NAME> (@willthames) extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 options: name: description: Name of the Web Application Firewall ACL to manage. required: yes type: str default_action: description: The action that you want AWS WAF to take when a request doesn't match the criteria specified in any of the Rule objects that are associated with the WebACL. choices: - block - allow - count type: str state: description: Whether the Web ACL should be present or absent. choices: - present - absent default: present type: str metric_name: description: - A friendly name or description for the metrics for this WebACL. - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace. - You can't change I(metric_name) after you create the WebACL. - Metric name will default to I(name) with disallowed characters stripped out. type: str rules: description: - A list of rules that the Web ACL will enforce. type: list elements: dict suboptions: name: description: Name of the rule. type: str required: true action: description: The action to perform. type: str required: true priority: description: The priority of the action. Priorities must be unique. Lower numbered priorities are evaluated first. type: int required: true type: description: The type of rule. choices: - rate_based - regular type: str purge_rules: description: - Whether to remove rules that aren't passed with I(rules). default: False type: bool waf_regional: description: Whether to use waf-regional module. default: false required: no type: bool - name: create web ACL community.aws.aws_waf_web_acl: name: my_web_acl rules: - name: my_rule priority: 1 action: block default_action: block purge_rules: yes state: present - name: delete the web acl community.aws.aws_waf_web_acl: name: my_web_acl state: absent web_acl: description: contents of the Web ACL. returned: always type: complex contains: default_action: description: Default action taken by the Web ACL if no rules match. returned: always type: dict sample: type: BLOCK metric_name: description: Metric name used as an identifier. returned: always type: str sample: mywebacl name: description: Friendly name of the Web ACL. returned: always type: str sample: my web acl rules: description: List of rules. returned: always type: complex contains: action: description: Action taken by the WAF when the rule matches. returned: always type: complex sample: type: ALLOW priority: description: priority number of the rule (lower numbers are run first). returned: always type: int sample: 2 rule_id: description: Rule ID. returned: always type: str sample: a6fc7ab5-287b-479f-8004-7fd0399daf75 type: description: Type of rule (either REGULAR or RATE_BASED). returned: always type: str sample: REGULAR web_acl_id: description: Unique identifier of Web ACL. returned: always type: str sample: 10fff965-4b6b-46e2-9d78-24f6d2e2d21c # handled by AnsibleAWSModule # Purge rules before adding new ones in case a deletion shares the same # priority as an insertion.
| 1.810016
| 2
|
xetra/common/constants.py
|
Kenebehi/xetra-production-etl-pipeline
| 0
|
6625924
|
<reponame>Kenebehi/xetra-production-etl-pipeline
"""
File to store constants
"""
from enum import Enum
class S3FileTypes(Enum):
"""
supported file types for S3BucketConnector
"""
CSV = 'csv'
PARQUET = 'parquet'
class MetaProcessFormat(Enum):
"""
formation for MetaProcess class
"""
META_DATE_FORMAT = '%Y-%m-%d'
META_PROCESS_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
META_SOURCE_DATE_COL = 'source_date'
META_PROCESS_COL = 'datetime_of_processing'
META_FILE_FORMAT = 'csv'
|
"""
File to store constants
"""
from enum import Enum
class S3FileTypes(Enum):
"""
supported file types for S3BucketConnector
"""
CSV = 'csv'
PARQUET = 'parquet'
class MetaProcessFormat(Enum):
"""
formation for MetaProcess class
"""
META_DATE_FORMAT = '%Y-%m-%d'
META_PROCESS_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
META_SOURCE_DATE_COL = 'source_date'
META_PROCESS_COL = 'datetime_of_processing'
META_FILE_FORMAT = 'csv'
|
en
| 0.796934
|
File to store constants supported file types for S3BucketConnector formation for MetaProcess class
| 2.595341
| 3
|
tests/extensions/jwt/test_jwt.py
|
ssfdust/full-stack-flask-smorest
| 33
|
6625925
|
<reponame>ssfdust/full-stack-flask-smorest
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import jsonify
class TestJwt:
def test_jwt_manager(self, app, db, monkeypatch, jwt):
from app.extensions.jwt.uitls import add_token_to_database
from flask_jwt_extended import create_access_token, jwt_required, current_user
from app.extensions.jwt.models import TokenBlackList
token = create_access_token("test")
add_token_to_database(token, "identity")
@app.route("/protected", methods=["GET"])
@jwt_required
def protected():
assert current_user == {"user": "test"}
return jsonify({"code": 1})
test_client = app.test_client()
headers = {"Authorization": "Bearer {}".format(token)}
resp = test_client.get("/protected")
assert resp.status_code == 401
resp = test_client.get("/protected", headers=headers)
resp.status_code == 200
TokenBlackList.query.delete()
db.session.commit()
resp = test_client.get("/protected", headers=headers)
resp.status_code == 401
def test_jwt_expired(self, app, db, jwt):
from app.extensions.jwt.models import TokenBlackList
from app.extensions.jwt.uitls import add_token_to_database
from flask_jwt_extended import create_access_token
from datetime import timedelta
token = create_access_token("test", expires_delta=-timedelta(seconds=1))
add_token_to_database(token, "identity", allow_expired=True)
headers = {"Authorization": "Bearer {}".format(token)}
test_client = app.test_client()
resp = test_client.get("/protected", headers=headers)
assert resp.status_code == 402
TokenBlackList.query.delete()
db.session.commit()
def test_jwt_revoke(self, app, db, jwt):
from app.extensions.jwt.uitls import add_token_to_database, revoke_token
from flask_jwt_extended import create_access_token, decode_token
from app.extensions.jwt.models import TokenBlackList
token = create_access_token("test",)
add_token_to_database(token, "identity", allow_expired=True)
_jwt = decode_token(token)
revoke_token(_jwt)
headers = {"Authorization": "Bearer {}".format(token)}
test_client = app.test_client()
resp = test_client.get("/protected", headers=headers)
assert resp.status_code == 401
TokenBlackList.query.delete()
db.session.commit()
revoke_token(_jwt)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import jsonify
class TestJwt:
def test_jwt_manager(self, app, db, monkeypatch, jwt):
from app.extensions.jwt.uitls import add_token_to_database
from flask_jwt_extended import create_access_token, jwt_required, current_user
from app.extensions.jwt.models import TokenBlackList
token = create_access_token("test")
add_token_to_database(token, "identity")
@app.route("/protected", methods=["GET"])
@jwt_required
def protected():
assert current_user == {"user": "test"}
return jsonify({"code": 1})
test_client = app.test_client()
headers = {"Authorization": "Bearer {}".format(token)}
resp = test_client.get("/protected")
assert resp.status_code == 401
resp = test_client.get("/protected", headers=headers)
resp.status_code == 200
TokenBlackList.query.delete()
db.session.commit()
resp = test_client.get("/protected", headers=headers)
resp.status_code == 401
def test_jwt_expired(self, app, db, jwt):
from app.extensions.jwt.models import TokenBlackList
from app.extensions.jwt.uitls import add_token_to_database
from flask_jwt_extended import create_access_token
from datetime import timedelta
token = create_access_token("test", expires_delta=-timedelta(seconds=1))
add_token_to_database(token, "identity", allow_expired=True)
headers = {"Authorization": "Bearer {}".format(token)}
test_client = app.test_client()
resp = test_client.get("/protected", headers=headers)
assert resp.status_code == 402
TokenBlackList.query.delete()
db.session.commit()
def test_jwt_revoke(self, app, db, jwt):
from app.extensions.jwt.uitls import add_token_to_database, revoke_token
from flask_jwt_extended import create_access_token, decode_token
from app.extensions.jwt.models import TokenBlackList
token = create_access_token("test",)
add_token_to_database(token, "identity", allow_expired=True)
_jwt = decode_token(token)
revoke_token(_jwt)
headers = {"Authorization": "Bearer {}".format(token)}
test_client = app.test_client()
resp = test_client.get("/protected", headers=headers)
assert resp.status_code == 401
TokenBlackList.query.delete()
db.session.commit()
revoke_token(_jwt)
|
en
| 0.352855
|
#!/usr/bin/env python # -*- coding: utf-8 -*-
| 2.308673
| 2
|
sample_project/appmedia/views.py
|
dokterbob/django-admin-sortable
| 0
|
6625926
|
import os, os.path as osp
from django.conf import settings
from django.views.static import serve as django_serve
from django.views.decorators.cache import cache_page
from django.db.models import get_apps
from django.core.cache import cache
from django.http import Http404, HttpResponse
def serve(request, app, path, show_indexes=True):
if request.method == 'GET':
apps = get_apps()
for x in apps:
app_dir = osp.dirname(x.__file__)
module = x.__name__
if app == module.split('.')[-2]: #we get the models module here
if app_dir.endswith("models"):
# this can happen only in case when models are an directory
app_dir = osp.split(app_dir)[0]
media_dir = osp.join(app_dir, "media", app)
if not osp.isdir(media_dir):
media_dir = osp.join(app_dir, "media")
asset = osp.join(media_dir, path)
if osp.exists(asset):
return django_serve(request, path, document_root=media_dir, show_indexes=show_indexes)
#continue
return django_serve(request, app+"/"+path, document_root=settings.MEDIA_ROOT, show_indexes=show_indexes)
elif request.method == 'POST':
data = request.POST.get("data", "")
apps = get_apps()
for x in apps:
app_dir = osp.dirname(x.__file__)
module = x.__name__
if app == module.split('.')[-2]: #we get the models module here
media_dir = osp.join(app_dir, "media")
asset = osp.join(media_dir, path)
if osp.exists(asset):
f = file(asset, 'w')
for line in data.split('\n'):
line.strip()
line = line[:-1]
if line:
selector, datap = line.split('{')
print >>f, selector, '{'
datap.strip()
lines = datap.split(';')
if lines:
print >>f, " "+";\n ".join(lines)
print >>f, '}\n'
f.close()
return django_serve(request, path, document_root=media_dir, show_indexes=show_indexes)
continue
def get_file(path):
app = path.split('/')[2]
path = "/".join(path.split('/')[3:])
apps = get_apps()
for x in apps:
app_dir = osp.dirname(x.__file__)
module = x.__name__
if app == module.split('.')[-2]: #we get the models module here
media_dir = osp.join(app_dir, "media")
asset = osp.join(media_dir, path)
if osp.exists(asset):
print osp.join(media_dir, path)
return osp.join(media_dir, path)
return osp.join(settings.MEDIA_ROOT, app+"/"+path)
@cache_page(24*60*60)
def serve_cached_asset(request, asset):
name, ext = asset.split('.')
files = cache.get(name)
if ext == 'js':
response = HttpResponse("\n".join([file(get_file(path)).read() for path in files]), mimetype="text/javascript")
return response
elif ext == 'css':
response = HttpResponse("\n".join([file(get_file(path)).read() for path in files]), mimetype="text/css")
return response
raise Http404()
|
import os, os.path as osp
from django.conf import settings
from django.views.static import serve as django_serve
from django.views.decorators.cache import cache_page
from django.db.models import get_apps
from django.core.cache import cache
from django.http import Http404, HttpResponse
def serve(request, app, path, show_indexes=True):
if request.method == 'GET':
apps = get_apps()
for x in apps:
app_dir = osp.dirname(x.__file__)
module = x.__name__
if app == module.split('.')[-2]: #we get the models module here
if app_dir.endswith("models"):
# this can happen only in case when models are an directory
app_dir = osp.split(app_dir)[0]
media_dir = osp.join(app_dir, "media", app)
if not osp.isdir(media_dir):
media_dir = osp.join(app_dir, "media")
asset = osp.join(media_dir, path)
if osp.exists(asset):
return django_serve(request, path, document_root=media_dir, show_indexes=show_indexes)
#continue
return django_serve(request, app+"/"+path, document_root=settings.MEDIA_ROOT, show_indexes=show_indexes)
elif request.method == 'POST':
data = request.POST.get("data", "")
apps = get_apps()
for x in apps:
app_dir = osp.dirname(x.__file__)
module = x.__name__
if app == module.split('.')[-2]: #we get the models module here
media_dir = osp.join(app_dir, "media")
asset = osp.join(media_dir, path)
if osp.exists(asset):
f = file(asset, 'w')
for line in data.split('\n'):
line.strip()
line = line[:-1]
if line:
selector, datap = line.split('{')
print >>f, selector, '{'
datap.strip()
lines = datap.split(';')
if lines:
print >>f, " "+";\n ".join(lines)
print >>f, '}\n'
f.close()
return django_serve(request, path, document_root=media_dir, show_indexes=show_indexes)
continue
def get_file(path):
app = path.split('/')[2]
path = "/".join(path.split('/')[3:])
apps = get_apps()
for x in apps:
app_dir = osp.dirname(x.__file__)
module = x.__name__
if app == module.split('.')[-2]: #we get the models module here
media_dir = osp.join(app_dir, "media")
asset = osp.join(media_dir, path)
if osp.exists(asset):
print osp.join(media_dir, path)
return osp.join(media_dir, path)
return osp.join(settings.MEDIA_ROOT, app+"/"+path)
@cache_page(24*60*60)
def serve_cached_asset(request, asset):
name, ext = asset.split('.')
files = cache.get(name)
if ext == 'js':
response = HttpResponse("\n".join([file(get_file(path)).read() for path in files]), mimetype="text/javascript")
return response
elif ext == 'css':
response = HttpResponse("\n".join([file(get_file(path)).read() for path in files]), mimetype="text/css")
return response
raise Http404()
|
en
| 0.644785
|
#we get the models module here # this can happen only in case when models are an directory #continue #we get the models module here #we get the models module here
| 2.231952
| 2
|
scantools/.history/scanbak/scanbackup_20210224150857.py
|
Octoberr/swm0920
| 2
|
6625927
|
"""
1、文件到这里
一份给ES 一份给自己
新增ES旧索引入库
在继承原有功能的基础上
重构备份程序,按照数据内的
国家-当前时间(年-月-日)
如果按照数据内的时间的话也会面临和按国家端口备份的问题
不用再分端口了
create by judy 20201217
"""
from pathlib import Path
import threading
import json
from queue import Queue
import traceback
import datetime
import time
from shutil import copyfile
import zipfile
import shutil
class ScanBackUP(object):
def __init__(self) -> None:
# super().__init__()
# 所有数据先到这
self._input = None
# 所有数据先复制一份到这, 这个是程序不用管的文件夹
self._esinput = None
# 将要备份的数据放到这, 要处理的数据全部放在这里
self._dbu_input = None
self._databack = None
self._zipdata: Path = None
self._zip_size = None
# 备份线程默认为一个,可以在配置里面更改重启
self.backup_thread = 1
self.zip_thread = 1
# 增加一个是否拷贝到ES的功能
self.copy_esinput_enable = True
self._tmp = Path('./tmp')
self._tmp.mkdir(exist_ok=True)
# 文件是否需要拷贝一份到旧索引
self._old_esinput = None
self.config_path = Path(r'./config_path.json')
try:
self._init_cpinfo()
except:
raise Exception(
f"初始化配置参数失败,请检查配置文件\nerror:{traceback.format_exc()}")
# 需要用到的参数
# 文件锁,同一时间只允许一个线程操作文件
self.__file_locker = threading.Lock()
self.__scan_file_locker = threading.Lock()
self._zipfile_locker = threading.Lock()
# 因为压缩可能处理的时间比较长,所以需要增加一个正在压缩的字典
self._zip_dealing = {}
# 根据后缀分配的需要处理的队列,目前只有iscan
self.iscan_task_queue = Queue()
self._zip_queue = Queue()
self.iscan_suffix = '.iscan_search'
# try:
# self._restore_existdata()
# except:
# raise Exception(
# "There's something wrong with restoring the environment")
def _init_cpinfo(self):
"""
初始化配置文件中的路径和参数
:return:
"""
conf_str = self.config_path.read_text(encoding='utf-8')
conf_dict = json.loads(conf_str)
_input = conf_dict.get('data_input')
if not isinstance(_input, str):
raise Exception("Unknown data_input path")
self._input = Path(_input)
self._input.mkdir(exist_ok=True)
print(
f"Start scan data file, input_file_path:{self._input.as_posix()}")
_esinput = conf_dict.get('es_input')
if not isinstance(_esinput, str):
raise Exception("Unknown es_input path")
self._esinput = Path(_esinput)
self._esinput.mkdir(exist_ok=True)
print(f"Save data to ES, es_path:{self._esinput.as_posix()}")
_dbuinput = conf_dict.get('backup_input')
if not isinstance(_dbuinput, str):
raise Exception("Unkown backup_input path")
self._dbu_input = Path(_dbuinput)
self._dbu_input.mkdir(exist_ok=True)
print(f"Data backup process path:{self._dbu_input.as_posix()}")
_databack = conf_dict.get('databackup')
if not isinstance(_databack, str):
raise Exception("Unknown databackup path")
self._databack = Path(_databack)
self._databack.mkdir(exist_ok=True)
print(f"Data save backup path:{self._databack.as_posix()}")
_zipdata = conf_dict.get('zipdata')
if not isinstance(_zipdata, str):
raise Exception("Unkown zipdata path")
self._zipdata = Path(_zipdata)
self._zipdata.mkdir(exist_ok=True)
print(f"Zipdata save path:{self._zipdata.as_posix()}")
_zip_size = conf_dict.get('zip_size')
if not isinstance(_zip_size, int):
raise Exception("Unknown zip_size type")
# 将单位换算成B
self._zip_size = _zip_size * 1024 * 1024
print(f"Zip data size:{_zip_size}MB")
backupthread = conf_dict.get('backup_thread')
if not isinstance(backupthread, int):
raise Exception("Unknown backupthread type")
self.backup_thread = backupthread
zipthread = conf_dict.get('zipdata_thread')
if not isinstance(zipthread, int):
raise Exception("Unknown zipthread type")
self.zip_thread = zipthread
time_limit = conf_dict.get('time_limit')
if not isinstance(time_limit, int):
raise Exception("Unknown time_limit type")
self._backup_interval_time = time_limit * 24 * 60 * 60
print(f"Zip data time expired after {time_limit} days")
# 默认拷贝到ES的功能为开放
copy_esinput_enable = conf_dict.get('copy_to_esinput', True)
self.copy_esinput_enable = copy_esinput_enable
# 拷贝旧索引数据
_old_esinput = conf_dict.get('old_esinput')
if not isinstance(_old_esinput, str):
raise Exception("Unknown old_esinput path")
self._old_esinput = Path(_old_esinput)
self._old_esinput.mkdir(exist_ok=True)
print(
f"Save data to old ES, old_espath:{self._old_esinput.as_posix()}")
def scan_file(self):
"""
扫描输入的文件
根据文件后缀进行分类,将文件放入待处理队列
:return:
"""
while True:
try:
for file in self._input.iterdir():
name = file.name
# 全部移动到tmp目录下去
tmpname = self._tmp / name
# file.replace(tmpname)
with self.__scan_file_locker:
# 这个文件得尽快移动到tmp文件夹,不然下次扫描又会扫描到它就会出问题
shutil.move(file.as_posix(), tmpname.as_posix())
try:
if tmpname.suffix == self.iscan_suffix:
# 只进行复制操作
# source: Path = self._input / name
target: Path = self._dbu_input / name
copyfile(tmpname.as_posix(), target.as_posix())
self.iscan_task_queue.put(target)
print(
f"Backup iscan_search data, filename:{file.as_posix()}")
except:
print(
f'Scan list file error, err:{traceback.format_exc()}')
finally:
# 最后无论如何都需要将文件输出到esinput
if self.copy_esinput_enable:
# 拷贝到新索引
outname = self._esinput / name
copyfile(tmpname.as_posix(), outname.as_posix())
# 拷贝带旧索引
old_outname = self._old_esinput / name
# 一般来说是不会有文件存在的,但是意外不可避免嘛, 所以这里做一个判定,如果还存在文件就删了
if tmpname.exists():
tmpname.unlink()
except:
print(f'Scan task file error, err:{traceback.format_exc()}')
continue
finally:
print("There is no scan data to back up")
time.sleep(0.5)
def _process_file(self, tmpfile: Path):
"""
读取文件里面的数据打开一下,获取到信息后再关上
"""
with tmpfile.open('r', encoding='utf-8') as fp:
j_text = fp.read()
d_text = json.loads(j_text)
# scan_time = d_text.get('time')
# if scan_time is None:
# scan_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
country = d_text.get('geoinfo').get('country').get('code')
except:
country = 'UNKNOWN'
return country
def back_file(self):
"""
开始备份数据,先保存到文件夹
当这个文件夹到达一定大小然后压缩保存
:return:
"""
got = False
while True:
got = False
if self.iscan_task_queue.empty():
time.sleep(0.5)
continue
try:
bfile: Path = self.iscan_task_queue.get()
got = True
name = bfile.name
# 现在直接读文件里面的国家和日期
country = self._process_file(bfile)
# 每次保存之前去判断下是否需要修改文件名字并进行压缩备份
date_now_str = datetime.datetime.now().strftime("%Y-%m-%d")
# 新建文件夹的时候需要锁一下,其他时候直接移动即可
with self.__file_locker:
# 先把文件移动过去
dirname: Path = self._databack / country / date_now_str
dirname.mkdir(exist_ok=True, parents=True)
# 移过去的文件名
filename = dirname / name
# 移动到目标文件夹
bfile.replace(filename)
print(
f"Backup file, country:{country}, filename:{name}, date:{date_now_str}")
except:
print(f'Backup file error:\n{traceback.format_exc()}')
finally:
if got:
self.iscan_task_queue.task_done()
def scan_zip_file(self):
"""
压缩文件的线程,每天去扫描一次
将昨天的文件夹压缩到压缩文件夹下
"""
while True:
try:
date_now = datetime.datetime.now().date()
for country in self._databack.iterdir():
if not country.exists():
continue
country_name = country.name
for d_file in country.iterdir():
if self._zip_dealing.__contains__(d_file):
continue
d_name = d_file.name
d_date = datetime.datetime.strptime(
d_name, "%Y-%m-%d").date()
# 如果是今天以前的数据那么就进行压缩
if date_now > d_date:
self._zip_queue.put((d_file, country_name))
with self._zipfile_locker:
# 加入正在处理队列
self._zip_dealing[d_file] = 1
print(
f"A file wait to zip, filename:{d_file.as_posix()}")
except:
print(f"Zip file error:\n{traceback.format_exc()}")
finally:
print("There is no scan data to zip")
time.sleep(3600)
def process_zip_file(self):
"""
压缩今天以前的文件夹
"""
got = False
zipfile_path = None
while True:
got = False
if self._zip_queue.empty():
time.sleep(1)
continue
try:
zipfile_path, country = self._zip_queue.get()
got = True
zip_store_file = self._zipdata / country
zip_store_file.mkdir(exist_ok=True)
zipname = zip_store_file/f"{zipfile_path.name}.zip"
print(
f"Start zipfile, filename:{zipname.as_posix()}")
# 增加一个写入限制
with zipfile.ZipFile(zipname.as_posix(), 'a', zipfile.ZIP_DEFLATED) as write:
for file in zipfile_path.iterdir():
write.write(file.as_posix())
# 写入后删除
file.unlink()
write.close()
# 最后删除已经压缩好的文件夹
zipfile_path.rmdir()
print(
f"Store zipfile success, filename:{zipname.as_posix()}")
except:
print(f"Zip file error:\n{traceback.format_exc()}")
finally:
if got:
self._zip_queue.task_done()
with self._zipfile_locker:
self._zip_dealing.pop(zipfile_path, None)
def start(self):
"""
项目启动
:return:
"""
thread1 = threading.Thread(target=self.scan_file, name="scanfile")
thread1.start()
for i in range(self.backup_thread):
t = threading.Thread(target=self.back_file, name=f"backfile{i}")
t.start()
thread2 = threading.Thread(
target=self.scan_zip_file, name=f"scan_zipfile")
thread2.start()
for j in range(self.zip_thread):
tz = threading.Thread(
target=self.process_zip_file, name=f"zipfile{j}")
tz.start()
if __name__ == "__main__":
scup = ScanBackUP()
scup.start()
|
"""
1、文件到这里
一份给ES 一份给自己
新增ES旧索引入库
在继承原有功能的基础上
重构备份程序,按照数据内的
国家-当前时间(年-月-日)
如果按照数据内的时间的话也会面临和按国家端口备份的问题
不用再分端口了
create by judy 20201217
"""
from pathlib import Path
import threading
import json
from queue import Queue
import traceback
import datetime
import time
from shutil import copyfile
import zipfile
import shutil
class ScanBackUP(object):
def __init__(self) -> None:
# super().__init__()
# 所有数据先到这
self._input = None
# 所有数据先复制一份到这, 这个是程序不用管的文件夹
self._esinput = None
# 将要备份的数据放到这, 要处理的数据全部放在这里
self._dbu_input = None
self._databack = None
self._zipdata: Path = None
self._zip_size = None
# 备份线程默认为一个,可以在配置里面更改重启
self.backup_thread = 1
self.zip_thread = 1
# 增加一个是否拷贝到ES的功能
self.copy_esinput_enable = True
self._tmp = Path('./tmp')
self._tmp.mkdir(exist_ok=True)
# 文件是否需要拷贝一份到旧索引
self._old_esinput = None
self.config_path = Path(r'./config_path.json')
try:
self._init_cpinfo()
except:
raise Exception(
f"初始化配置参数失败,请检查配置文件\nerror:{traceback.format_exc()}")
# 需要用到的参数
# 文件锁,同一时间只允许一个线程操作文件
self.__file_locker = threading.Lock()
self.__scan_file_locker = threading.Lock()
self._zipfile_locker = threading.Lock()
# 因为压缩可能处理的时间比较长,所以需要增加一个正在压缩的字典
self._zip_dealing = {}
# 根据后缀分配的需要处理的队列,目前只有iscan
self.iscan_task_queue = Queue()
self._zip_queue = Queue()
self.iscan_suffix = '.iscan_search'
# try:
# self._restore_existdata()
# except:
# raise Exception(
# "There's something wrong with restoring the environment")
def _init_cpinfo(self):
"""
初始化配置文件中的路径和参数
:return:
"""
conf_str = self.config_path.read_text(encoding='utf-8')
conf_dict = json.loads(conf_str)
_input = conf_dict.get('data_input')
if not isinstance(_input, str):
raise Exception("Unknown data_input path")
self._input = Path(_input)
self._input.mkdir(exist_ok=True)
print(
f"Start scan data file, input_file_path:{self._input.as_posix()}")
_esinput = conf_dict.get('es_input')
if not isinstance(_esinput, str):
raise Exception("Unknown es_input path")
self._esinput = Path(_esinput)
self._esinput.mkdir(exist_ok=True)
print(f"Save data to ES, es_path:{self._esinput.as_posix()}")
_dbuinput = conf_dict.get('backup_input')
if not isinstance(_dbuinput, str):
raise Exception("Unkown backup_input path")
self._dbu_input = Path(_dbuinput)
self._dbu_input.mkdir(exist_ok=True)
print(f"Data backup process path:{self._dbu_input.as_posix()}")
_databack = conf_dict.get('databackup')
if not isinstance(_databack, str):
raise Exception("Unknown databackup path")
self._databack = Path(_databack)
self._databack.mkdir(exist_ok=True)
print(f"Data save backup path:{self._databack.as_posix()}")
_zipdata = conf_dict.get('zipdata')
if not isinstance(_zipdata, str):
raise Exception("Unkown zipdata path")
self._zipdata = Path(_zipdata)
self._zipdata.mkdir(exist_ok=True)
print(f"Zipdata save path:{self._zipdata.as_posix()}")
_zip_size = conf_dict.get('zip_size')
if not isinstance(_zip_size, int):
raise Exception("Unknown zip_size type")
# 将单位换算成B
self._zip_size = _zip_size * 1024 * 1024
print(f"Zip data size:{_zip_size}MB")
backupthread = conf_dict.get('backup_thread')
if not isinstance(backupthread, int):
raise Exception("Unknown backupthread type")
self.backup_thread = backupthread
zipthread = conf_dict.get('zipdata_thread')
if not isinstance(zipthread, int):
raise Exception("Unknown zipthread type")
self.zip_thread = zipthread
time_limit = conf_dict.get('time_limit')
if not isinstance(time_limit, int):
raise Exception("Unknown time_limit type")
self._backup_interval_time = time_limit * 24 * 60 * 60
print(f"Zip data time expired after {time_limit} days")
# 默认拷贝到ES的功能为开放
copy_esinput_enable = conf_dict.get('copy_to_esinput', True)
self.copy_esinput_enable = copy_esinput_enable
# 拷贝旧索引数据
_old_esinput = conf_dict.get('old_esinput')
if not isinstance(_old_esinput, str):
raise Exception("Unknown old_esinput path")
self._old_esinput = Path(_old_esinput)
self._old_esinput.mkdir(exist_ok=True)
print(
f"Save data to old ES, old_espath:{self._old_esinput.as_posix()}")
def scan_file(self):
"""
扫描输入的文件
根据文件后缀进行分类,将文件放入待处理队列
:return:
"""
while True:
try:
for file in self._input.iterdir():
name = file.name
# 全部移动到tmp目录下去
tmpname = self._tmp / name
# file.replace(tmpname)
with self.__scan_file_locker:
# 这个文件得尽快移动到tmp文件夹,不然下次扫描又会扫描到它就会出问题
shutil.move(file.as_posix(), tmpname.as_posix())
try:
if tmpname.suffix == self.iscan_suffix:
# 只进行复制操作
# source: Path = self._input / name
target: Path = self._dbu_input / name
copyfile(tmpname.as_posix(), target.as_posix())
self.iscan_task_queue.put(target)
print(
f"Backup iscan_search data, filename:{file.as_posix()}")
except:
print(
f'Scan list file error, err:{traceback.format_exc()}')
finally:
# 最后无论如何都需要将文件输出到esinput
if self.copy_esinput_enable:
# 拷贝到新索引
outname = self._esinput / name
copyfile(tmpname.as_posix(), outname.as_posix())
# 拷贝带旧索引
old_outname = self._old_esinput / name
# 一般来说是不会有文件存在的,但是意外不可避免嘛, 所以这里做一个判定,如果还存在文件就删了
if tmpname.exists():
tmpname.unlink()
except:
print(f'Scan task file error, err:{traceback.format_exc()}')
continue
finally:
print("There is no scan data to back up")
time.sleep(0.5)
def _process_file(self, tmpfile: Path):
"""
读取文件里面的数据打开一下,获取到信息后再关上
"""
with tmpfile.open('r', encoding='utf-8') as fp:
j_text = fp.read()
d_text = json.loads(j_text)
# scan_time = d_text.get('time')
# if scan_time is None:
# scan_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
country = d_text.get('geoinfo').get('country').get('code')
except:
country = 'UNKNOWN'
return country
def back_file(self):
"""
开始备份数据,先保存到文件夹
当这个文件夹到达一定大小然后压缩保存
:return:
"""
got = False
while True:
got = False
if self.iscan_task_queue.empty():
time.sleep(0.5)
continue
try:
bfile: Path = self.iscan_task_queue.get()
got = True
name = bfile.name
# 现在直接读文件里面的国家和日期
country = self._process_file(bfile)
# 每次保存之前去判断下是否需要修改文件名字并进行压缩备份
date_now_str = datetime.datetime.now().strftime("%Y-%m-%d")
# 新建文件夹的时候需要锁一下,其他时候直接移动即可
with self.__file_locker:
# 先把文件移动过去
dirname: Path = self._databack / country / date_now_str
dirname.mkdir(exist_ok=True, parents=True)
# 移过去的文件名
filename = dirname / name
# 移动到目标文件夹
bfile.replace(filename)
print(
f"Backup file, country:{country}, filename:{name}, date:{date_now_str}")
except:
print(f'Backup file error:\n{traceback.format_exc()}')
finally:
if got:
self.iscan_task_queue.task_done()
def scan_zip_file(self):
"""
压缩文件的线程,每天去扫描一次
将昨天的文件夹压缩到压缩文件夹下
"""
while True:
try:
date_now = datetime.datetime.now().date()
for country in self._databack.iterdir():
if not country.exists():
continue
country_name = country.name
for d_file in country.iterdir():
if self._zip_dealing.__contains__(d_file):
continue
d_name = d_file.name
d_date = datetime.datetime.strptime(
d_name, "%Y-%m-%d").date()
# 如果是今天以前的数据那么就进行压缩
if date_now > d_date:
self._zip_queue.put((d_file, country_name))
with self._zipfile_locker:
# 加入正在处理队列
self._zip_dealing[d_file] = 1
print(
f"A file wait to zip, filename:{d_file.as_posix()}")
except:
print(f"Zip file error:\n{traceback.format_exc()}")
finally:
print("There is no scan data to zip")
time.sleep(3600)
def process_zip_file(self):
"""
压缩今天以前的文件夹
"""
got = False
zipfile_path = None
while True:
got = False
if self._zip_queue.empty():
time.sleep(1)
continue
try:
zipfile_path, country = self._zip_queue.get()
got = True
zip_store_file = self._zipdata / country
zip_store_file.mkdir(exist_ok=True)
zipname = zip_store_file/f"{zipfile_path.name}.zip"
print(
f"Start zipfile, filename:{zipname.as_posix()}")
# 增加一个写入限制
with zipfile.ZipFile(zipname.as_posix(), 'a', zipfile.ZIP_DEFLATED) as write:
for file in zipfile_path.iterdir():
write.write(file.as_posix())
# 写入后删除
file.unlink()
write.close()
# 最后删除已经压缩好的文件夹
zipfile_path.rmdir()
print(
f"Store zipfile success, filename:{zipname.as_posix()}")
except:
print(f"Zip file error:\n{traceback.format_exc()}")
finally:
if got:
self._zip_queue.task_done()
with self._zipfile_locker:
self._zip_dealing.pop(zipfile_path, None)
def start(self):
"""
项目启动
:return:
"""
thread1 = threading.Thread(target=self.scan_file, name="scanfile")
thread1.start()
for i in range(self.backup_thread):
t = threading.Thread(target=self.back_file, name=f"backfile{i}")
t.start()
thread2 = threading.Thread(
target=self.scan_zip_file, name=f"scan_zipfile")
thread2.start()
for j in range(self.zip_thread):
tz = threading.Thread(
target=self.process_zip_file, name=f"zipfile{j}")
tz.start()
if __name__ == "__main__":
scup = ScanBackUP()
scup.start()
|
zh
| 0.970092
|
1、文件到这里 一份给ES 一份给自己 新增ES旧索引入库 在继承原有功能的基础上 重构备份程序,按照数据内的 国家-当前时间(年-月-日) 如果按照数据内的时间的话也会面临和按国家端口备份的问题 不用再分端口了 create by judy 20201217 # super().__init__() # 所有数据先到这 # 所有数据先复制一份到这, 这个是程序不用管的文件夹 # 将要备份的数据放到这, 要处理的数据全部放在这里 # 备份线程默认为一个,可以在配置里面更改重启 # 增加一个是否拷贝到ES的功能 # 文件是否需要拷贝一份到旧索引 # 需要用到的参数 # 文件锁,同一时间只允许一个线程操作文件 # 因为压缩可能处理的时间比较长,所以需要增加一个正在压缩的字典 # 根据后缀分配的需要处理的队列,目前只有iscan # try: # self._restore_existdata() # except: # raise Exception( # "There's something wrong with restoring the environment") 初始化配置文件中的路径和参数 :return: # 将单位换算成B # 默认拷贝到ES的功能为开放 # 拷贝旧索引数据 扫描输入的文件 根据文件后缀进行分类,将文件放入待处理队列 :return: # 全部移动到tmp目录下去 # file.replace(tmpname) # 这个文件得尽快移动到tmp文件夹,不然下次扫描又会扫描到它就会出问题 # 只进行复制操作 # source: Path = self._input / name # 最后无论如何都需要将文件输出到esinput # 拷贝到新索引 # 拷贝带旧索引 # 一般来说是不会有文件存在的,但是意外不可避免嘛, 所以这里做一个判定,如果还存在文件就删了 读取文件里面的数据打开一下,获取到信息后再关上 # scan_time = d_text.get('time') # if scan_time is None: # scan_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") 开始备份数据,先保存到文件夹 当这个文件夹到达一定大小然后压缩保存 :return: # 现在直接读文件里面的国家和日期 # 每次保存之前去判断下是否需要修改文件名字并进行压缩备份 # 新建文件夹的时候需要锁一下,其他时候直接移动即可 # 先把文件移动过去 # 移过去的文件名 # 移动到目标文件夹 压缩文件的线程,每天去扫描一次 将昨天的文件夹压缩到压缩文件夹下 # 如果是今天以前的数据那么就进行压缩 # 加入正在处理队列 压缩今天以前的文件夹 # 增加一个写入限制 # 写入后删除 # 最后删除已经压缩好的文件夹 项目启动 :return:
| 2.328407
| 2
|
venv/lib/python3.5/site-packages/igraph/test/bipartite.py
|
dtklinh/Protein-Rigid-Domains-Estimation
| 2
|
6625928
|
<filename>venv/lib/python3.5/site-packages/igraph/test/bipartite.py
import unittest
from igraph import *
class BipartiteTests(unittest.TestCase):
def testCreateBipartite(self):
g = Graph.Bipartite([0, 1]*5, [(0,1),(2,3),(4,5),(6,7),(8,9)])
self.assertTrue(g.vcount() == 10 and g.ecount() == 5 and g.is_directed() == False)
self.assertTrue(g.is_bipartite())
self.assertTrue(g.vs["type"] == [False, True]*5)
def testFullBipartite(self):
g = Graph.Full_Bipartite(10, 5)
self.assertTrue(g.vcount() == 15 and g.ecount() == 50 and g.is_directed() == False)
expected = sorted([(i, j) for i in range(10) for j in range(10, 15)])
self.assertTrue(sorted(g.get_edgelist()) == expected)
self.assertTrue(g.vs["type"] == [False]*10 + [True]*5)
g = Graph.Full_Bipartite(10, 5, directed=True, mode=OUT)
self.assertTrue(g.vcount() == 15 and g.ecount() == 50 and g.is_directed() == True)
self.assertTrue(sorted(g.get_edgelist()) == expected)
self.assertTrue(g.vs["type"] == [False]*10 + [True]*5)
g = Graph.Full_Bipartite(10, 5, directed=True, mode=IN)
self.assertTrue(g.vcount() == 15 and g.ecount() == 50 and g.is_directed() == True)
self.assertTrue(sorted(g.get_edgelist()) == sorted([(i,j) for j, i in expected]))
self.assertTrue(g.vs["type"] == [False]*10 + [True]*5)
g = Graph.Full_Bipartite(10, 5, directed=True)
self.assertTrue(g.vcount() == 15 and g.ecount() == 100 and g.is_directed() == True)
expected.extend([(j, i) for i, j in expected])
expected.sort()
self.assertTrue(sorted(g.get_edgelist()) == expected)
self.assertTrue(g.vs["type"] == [False]*10 + [True]*5)
def testIncidence(self):
g = Graph.Incidence([[0, 1, 1], [1, 2, 0]])
self.assertTrue(g.vcount() == 5 and g.ecount() == 4 and g.is_directed() == False)
self.assertTrue(g.vs["type"] == [False]*2 + [True]*3)
self.assertTrue(sorted(g.get_edgelist()) == [(0,3),(0,4),(1,2),(1,3)])
g = Graph.Incidence([[0, 1, 1], [1, 2, 0]], multiple=True)
self.assertTrue(g.vcount() == 5 and g.ecount() == 5 and g.is_directed() == False)
self.assertTrue(g.vs["type"] == [False]*2 + [True]*3)
self.assertTrue(sorted(g.get_edgelist()) == [(0,3),(0,4),(1,2),(1,3),(1,3)])
g = Graph.Incidence([[0, 1, 1], [1, 2, 0]], directed=True)
self.assertTrue(g.vcount() == 5 and g.ecount() == 4 and g.is_directed() == True)
self.assertTrue(g.vs["type"] == [False]*2 + [True]*3)
self.assertTrue(sorted(g.get_edgelist()) == [(0,3),(0,4),(1,2),(1,3)])
g = Graph.Incidence([[0, 1, 1], [1, 2, 0]], directed=True, mode="in")
self.assertTrue(g.vcount() == 5 and g.ecount() == 4 and g.is_directed() == True)
self.assertTrue(g.vs["type"] == [False]*2 + [True]*3)
self.assertTrue(sorted(g.get_edgelist()) == [(2,1),(3,0),(3,1),(4,0)])
def testGetIncidence(self):
mat = [[0, 1, 1], [1, 1, 0]]
v1, v2 = [0, 1], [2, 3, 4]
g = Graph.Incidence(mat)
self.assertTrue(g.get_incidence() == (mat, v1, v2))
g.vs["type2"] = g.vs["type"]
self.assertTrue(g.get_incidence("type2") == (mat, v1, v2))
self.assertTrue(g.get_incidence(g.vs["type2"]) == (mat, v1, v2))
def testBipartiteProjection(self):
g = Graph.Full_Bipartite(10, 5)
g1, g2 = g.bipartite_projection()
self.assertTrue(g1.isomorphic(Graph.Full(10)))
self.assertTrue(g2.isomorphic(Graph.Full(5)))
self.assertTrue(g.bipartite_projection(which=0).isomorphic(g1))
self.assertTrue(g.bipartite_projection(which=1).isomorphic(g2))
self.assertTrue(g.bipartite_projection(which=False).isomorphic(g1))
self.assertTrue(g.bipartite_projection(which=True).isomorphic(g2))
self.assertTrue(g1.es["weight"] == [5] * 45)
self.assertTrue(g2.es["weight"] == [10] * 10)
self.assertTrue(g.bipartite_projection_size() == (10, 45, 5, 10))
g1, g2 = g.bipartite_projection(probe1=10)
self.assertTrue(g1.isomorphic(Graph.Full(5)))
self.assertTrue(g2.isomorphic(Graph.Full(10)))
self.assertTrue(g.bipartite_projection(which=0).isomorphic(g2))
self.assertTrue(g.bipartite_projection(which=1).isomorphic(g1))
self.assertTrue(g.bipartite_projection(which=False).isomorphic(g2))
self.assertTrue(g.bipartite_projection(which=True).isomorphic(g1))
g1, g2 = g.bipartite_projection(multiplicity=False)
self.assertTrue(g1.isomorphic(Graph.Full(10)))
self.assertTrue(g2.isomorphic(Graph.Full(5)))
self.assertTrue(g.bipartite_projection(which=0).isomorphic(g1))
self.assertTrue(g.bipartite_projection(which=1).isomorphic(g2))
self.assertTrue(g.bipartite_projection(which=False).isomorphic(g1))
self.assertTrue(g.bipartite_projection(which=True).isomorphic(g2))
self.assertTrue("weight" not in g1.edge_attributes())
self.assertTrue("weight" not in g2.edge_attributes())
def testIsBipartite(self):
g = Graph.Star(10)
self.assertTrue(g.is_bipartite() == True)
self.assertTrue(g.is_bipartite(True) == (True, [False] + [True]*9))
g = Graph.Tree(100, 3)
self.assertTrue(g.is_bipartite() == True)
g = Graph.Ring(9)
self.assertTrue(g.is_bipartite() == False)
self.assertTrue(g.is_bipartite(True) == (False, None))
g = Graph.Ring(10)
self.assertTrue(g.is_bipartite() == True)
g += (2, 0)
self.assertTrue(g.is_bipartite(True) == (False, None))
def suite():
bipartite_suite = unittest.makeSuite(BipartiteTests)
return unittest.TestSuite([bipartite_suite])
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
<filename>venv/lib/python3.5/site-packages/igraph/test/bipartite.py
import unittest
from igraph import *
class BipartiteTests(unittest.TestCase):
def testCreateBipartite(self):
g = Graph.Bipartite([0, 1]*5, [(0,1),(2,3),(4,5),(6,7),(8,9)])
self.assertTrue(g.vcount() == 10 and g.ecount() == 5 and g.is_directed() == False)
self.assertTrue(g.is_bipartite())
self.assertTrue(g.vs["type"] == [False, True]*5)
def testFullBipartite(self):
g = Graph.Full_Bipartite(10, 5)
self.assertTrue(g.vcount() == 15 and g.ecount() == 50 and g.is_directed() == False)
expected = sorted([(i, j) for i in range(10) for j in range(10, 15)])
self.assertTrue(sorted(g.get_edgelist()) == expected)
self.assertTrue(g.vs["type"] == [False]*10 + [True]*5)
g = Graph.Full_Bipartite(10, 5, directed=True, mode=OUT)
self.assertTrue(g.vcount() == 15 and g.ecount() == 50 and g.is_directed() == True)
self.assertTrue(sorted(g.get_edgelist()) == expected)
self.assertTrue(g.vs["type"] == [False]*10 + [True]*5)
g = Graph.Full_Bipartite(10, 5, directed=True, mode=IN)
self.assertTrue(g.vcount() == 15 and g.ecount() == 50 and g.is_directed() == True)
self.assertTrue(sorted(g.get_edgelist()) == sorted([(i,j) for j, i in expected]))
self.assertTrue(g.vs["type"] == [False]*10 + [True]*5)
g = Graph.Full_Bipartite(10, 5, directed=True)
self.assertTrue(g.vcount() == 15 and g.ecount() == 100 and g.is_directed() == True)
expected.extend([(j, i) for i, j in expected])
expected.sort()
self.assertTrue(sorted(g.get_edgelist()) == expected)
self.assertTrue(g.vs["type"] == [False]*10 + [True]*5)
def testIncidence(self):
g = Graph.Incidence([[0, 1, 1], [1, 2, 0]])
self.assertTrue(g.vcount() == 5 and g.ecount() == 4 and g.is_directed() == False)
self.assertTrue(g.vs["type"] == [False]*2 + [True]*3)
self.assertTrue(sorted(g.get_edgelist()) == [(0,3),(0,4),(1,2),(1,3)])
g = Graph.Incidence([[0, 1, 1], [1, 2, 0]], multiple=True)
self.assertTrue(g.vcount() == 5 and g.ecount() == 5 and g.is_directed() == False)
self.assertTrue(g.vs["type"] == [False]*2 + [True]*3)
self.assertTrue(sorted(g.get_edgelist()) == [(0,3),(0,4),(1,2),(1,3),(1,3)])
g = Graph.Incidence([[0, 1, 1], [1, 2, 0]], directed=True)
self.assertTrue(g.vcount() == 5 and g.ecount() == 4 and g.is_directed() == True)
self.assertTrue(g.vs["type"] == [False]*2 + [True]*3)
self.assertTrue(sorted(g.get_edgelist()) == [(0,3),(0,4),(1,2),(1,3)])
g = Graph.Incidence([[0, 1, 1], [1, 2, 0]], directed=True, mode="in")
self.assertTrue(g.vcount() == 5 and g.ecount() == 4 and g.is_directed() == True)
self.assertTrue(g.vs["type"] == [False]*2 + [True]*3)
self.assertTrue(sorted(g.get_edgelist()) == [(2,1),(3,0),(3,1),(4,0)])
def testGetIncidence(self):
mat = [[0, 1, 1], [1, 1, 0]]
v1, v2 = [0, 1], [2, 3, 4]
g = Graph.Incidence(mat)
self.assertTrue(g.get_incidence() == (mat, v1, v2))
g.vs["type2"] = g.vs["type"]
self.assertTrue(g.get_incidence("type2") == (mat, v1, v2))
self.assertTrue(g.get_incidence(g.vs["type2"]) == (mat, v1, v2))
def testBipartiteProjection(self):
g = Graph.Full_Bipartite(10, 5)
g1, g2 = g.bipartite_projection()
self.assertTrue(g1.isomorphic(Graph.Full(10)))
self.assertTrue(g2.isomorphic(Graph.Full(5)))
self.assertTrue(g.bipartite_projection(which=0).isomorphic(g1))
self.assertTrue(g.bipartite_projection(which=1).isomorphic(g2))
self.assertTrue(g.bipartite_projection(which=False).isomorphic(g1))
self.assertTrue(g.bipartite_projection(which=True).isomorphic(g2))
self.assertTrue(g1.es["weight"] == [5] * 45)
self.assertTrue(g2.es["weight"] == [10] * 10)
self.assertTrue(g.bipartite_projection_size() == (10, 45, 5, 10))
g1, g2 = g.bipartite_projection(probe1=10)
self.assertTrue(g1.isomorphic(Graph.Full(5)))
self.assertTrue(g2.isomorphic(Graph.Full(10)))
self.assertTrue(g.bipartite_projection(which=0).isomorphic(g2))
self.assertTrue(g.bipartite_projection(which=1).isomorphic(g1))
self.assertTrue(g.bipartite_projection(which=False).isomorphic(g2))
self.assertTrue(g.bipartite_projection(which=True).isomorphic(g1))
g1, g2 = g.bipartite_projection(multiplicity=False)
self.assertTrue(g1.isomorphic(Graph.Full(10)))
self.assertTrue(g2.isomorphic(Graph.Full(5)))
self.assertTrue(g.bipartite_projection(which=0).isomorphic(g1))
self.assertTrue(g.bipartite_projection(which=1).isomorphic(g2))
self.assertTrue(g.bipartite_projection(which=False).isomorphic(g1))
self.assertTrue(g.bipartite_projection(which=True).isomorphic(g2))
self.assertTrue("weight" not in g1.edge_attributes())
self.assertTrue("weight" not in g2.edge_attributes())
def testIsBipartite(self):
g = Graph.Star(10)
self.assertTrue(g.is_bipartite() == True)
self.assertTrue(g.is_bipartite(True) == (True, [False] + [True]*9))
g = Graph.Tree(100, 3)
self.assertTrue(g.is_bipartite() == True)
g = Graph.Ring(9)
self.assertTrue(g.is_bipartite() == False)
self.assertTrue(g.is_bipartite(True) == (False, None))
g = Graph.Ring(10)
self.assertTrue(g.is_bipartite() == True)
g += (2, 0)
self.assertTrue(g.is_bipartite(True) == (False, None))
def suite():
bipartite_suite = unittest.makeSuite(BipartiteTests)
return unittest.TestSuite([bipartite_suite])
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
none
| 1
| 3.216071
| 3
|
|
frappe/core/doctype/role/role.py
|
jimmyrianto/frappe
| 5
|
6625929
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Role(Document):
def before_rename(self, old, new, merge=False):
if old in ("Guest", "Administrator", "System Manager", "All"):
frappe.throw(frappe._("Standard roles cannot be renamed"))
def after_insert(self):
frappe.cache().hdel('roles', 'Administrator')
def validate(self):
if self.disabled:
if self.name in ("Guest", "Administrator", "System Manager", "All"):
frappe.throw(frappe._("Standard roles cannot be disabled"))
else:
frappe.db.sql("delete from `tabHas Role` where role = %s", self.name)
frappe.clear_cache()
# Get email addresses of all users that have been assigned this role
def get_emails_from_role(role):
emails = []
users = frappe.get_list("Has Role", filters={"role": role, "parenttype": "User"},
fields=["parent"])
for user in users:
user_email, enabled = frappe.db.get_value("User", user.parent, ["email", "enabled"])
if enabled and user_email not in ["<EMAIL>", "<EMAIL>"]:
emails.append(user_email)
return emails
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Role(Document):
def before_rename(self, old, new, merge=False):
if old in ("Guest", "Administrator", "System Manager", "All"):
frappe.throw(frappe._("Standard roles cannot be renamed"))
def after_insert(self):
frappe.cache().hdel('roles', 'Administrator')
def validate(self):
if self.disabled:
if self.name in ("Guest", "Administrator", "System Manager", "All"):
frappe.throw(frappe._("Standard roles cannot be disabled"))
else:
frappe.db.sql("delete from `tabHas Role` where role = %s", self.name)
frappe.clear_cache()
# Get email addresses of all users that have been assigned this role
def get_emails_from_role(role):
emails = []
users = frappe.get_list("Has Role", filters={"role": role, "parenttype": "User"},
fields=["parent"])
for user in users:
user_email, enabled = frappe.db.get_value("User", user.parent, ["email", "enabled"])
if enabled and user_email not in ["<EMAIL>", "<EMAIL>"]:
emails.append(user_email)
return emails
|
en
| 0.852487
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt # Get email addresses of all users that have been assigned this role
| 2.1158
| 2
|
src/Main.py
|
nekonyanneko/GA
| 0
|
6625930
|
# -*- coding: utf-8 -*-
import random
from scoop import futures
from deap import base
from deap import creator
from deap import tools
from deap import cma
import Enum as enu
import Employee as emp
import Shift as shi
import EvalShift as eva
"""
deap setting
"""
# 評価関数のFit率の重要度(小さい値の方が重要視される)
creator.create("FitnessPeopleCount", base.Fitness, weights=(-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0))
"""
creator.create("FitnessPeopleCount", base.Fitness, weights=(
enu.EVA_WEIGHT_1,
enu.EVA_WEIGHT_2,
enu.EVA_WEIGHT_3,
enu.EVA_WEIGHT_4,
enu.EVA_WEIGHT_5,
enu.EVA_WEIGHT_6,
enu.EVA_WEIGHT_7,
enu.EVA_WEIGHT_8,
enu.EVA_WEIGHT_9,
enu.EVA_WEIGHT_10,
enu.EVA_WEIGHT_11,
enu.EVA_WEIGHT_12,
enu.EVA_WEIGHT_13,
enu.EVA_WEIGHT_14,
enu.EVA_WEIGHT_15,
enu.EVA_WEIGHT_16
))
"""
creator.create("Individual", list, fitness=creator.FitnessPeopleCount)
toolbox = base.Toolbox()
toolbox.register("map", futures.map)
toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, enu.INDIVIDUAL_NUM)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", eva.evalShift)
# 交叉関数を定義(二点交叉)
toolbox.register("mate", tools.cxTwoPoint)
# 変異関数を定義(ビット反転、変異確率が5%)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
# 選択関数を定義(トーナメント選択、tournsizeはトーナメントの数)
toolbox.register("select", tools.selTournament, tournsize=enu.TOURN_SIZE)
if __name__ == '__main__':
# 初期集団を生成する
pop = toolbox.population(n=enu.NUM)
# 交差確率、突然変異確>率、進化計算のループ回数
CXPB, MUTPB, NGEN = enu.CROSS_PROBABIRTY, enu.MULTATION_PROBABIRTY, enu.LOOP_NUM
print("進化開始")
# 初期集団の個体を評価する
print ("初期集団の個体数:%i" % len(pop))
fitnesses = list(map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses): # zipは複数変数の同時ループ
# 適合性をセットする
ind.fitness.values = fit
print(" %i の個体を評価" % len(pop))
# 進化計算開始
for g in range(NGEN):
print("-- %i 世代 --" % g)
print("CXPB:%lf MUTPB:%lf" % (CXPB, MUTPB))
# 選択
# 次世代の個体群を選択
offspring = toolbox.select(pop, len(pop))
# 個体群のクローンを生成
offspring = list(map(toolbox.clone, offspring))
# 選択した個体群に交差と突然変異を適応する
# 交叉
# 偶数番目と奇数番目の個体を取り出して交差
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
# 交叉された個体の適合度を削除する
del child1.fitness.values
del child2.fitness.values
# 変異
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# 適合度が計算されていない個体を集めて適合度を計算
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# 収束するように確率を調整
CXPB = CXPB * enu.CROSS_LOSS
MUTPB = MUTPB * enu.MULTATION_LOSS
print(" %i の個体を評価" % len(invalid_ind))
# 次世代群をoffspringにする
pop[:] = offspring
# すべての個体の適合度を配列にする
index = 1
for v in ind.fitness.values:
fits = [v for ind in pop]
length = len(pop)
mean = sum(fits) / length
sum2 = sum(x*x for x in fits)
std = abs(sum2 / length - mean**2)**0.5
print("* パラメータ%d") % index
print(" Min %s" % min(fits))
print(" Max %s" % max(fits))
print(" Avg %s" % mean)
print(" Std %s" % std)
index += 1
print("-- 進化終了 --")
best_ind = tools.selBest(pop, 1)[0]
print("最も優れていた個体: %s, %s" % (best_ind, best_ind.fitness.values))
shift = shi.Shift(best_ind)
print("-- 出力 --")
shift.print_csv()
#shift.print_tsv()
|
# -*- coding: utf-8 -*-
import random
from scoop import futures
from deap import base
from deap import creator
from deap import tools
from deap import cma
import Enum as enu
import Employee as emp
import Shift as shi
import EvalShift as eva
"""
deap setting
"""
# 評価関数のFit率の重要度(小さい値の方が重要視される)
creator.create("FitnessPeopleCount", base.Fitness, weights=(-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0))
"""
creator.create("FitnessPeopleCount", base.Fitness, weights=(
enu.EVA_WEIGHT_1,
enu.EVA_WEIGHT_2,
enu.EVA_WEIGHT_3,
enu.EVA_WEIGHT_4,
enu.EVA_WEIGHT_5,
enu.EVA_WEIGHT_6,
enu.EVA_WEIGHT_7,
enu.EVA_WEIGHT_8,
enu.EVA_WEIGHT_9,
enu.EVA_WEIGHT_10,
enu.EVA_WEIGHT_11,
enu.EVA_WEIGHT_12,
enu.EVA_WEIGHT_13,
enu.EVA_WEIGHT_14,
enu.EVA_WEIGHT_15,
enu.EVA_WEIGHT_16
))
"""
creator.create("Individual", list, fitness=creator.FitnessPeopleCount)
toolbox = base.Toolbox()
toolbox.register("map", futures.map)
toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, enu.INDIVIDUAL_NUM)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", eva.evalShift)
# 交叉関数を定義(二点交叉)
toolbox.register("mate", tools.cxTwoPoint)
# 変異関数を定義(ビット反転、変異確率が5%)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
# 選択関数を定義(トーナメント選択、tournsizeはトーナメントの数)
toolbox.register("select", tools.selTournament, tournsize=enu.TOURN_SIZE)
if __name__ == '__main__':
# 初期集団を生成する
pop = toolbox.population(n=enu.NUM)
# 交差確率、突然変異確>率、進化計算のループ回数
CXPB, MUTPB, NGEN = enu.CROSS_PROBABIRTY, enu.MULTATION_PROBABIRTY, enu.LOOP_NUM
print("進化開始")
# 初期集団の個体を評価する
print ("初期集団の個体数:%i" % len(pop))
fitnesses = list(map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses): # zipは複数変数の同時ループ
# 適合性をセットする
ind.fitness.values = fit
print(" %i の個体を評価" % len(pop))
# 進化計算開始
for g in range(NGEN):
print("-- %i 世代 --" % g)
print("CXPB:%lf MUTPB:%lf" % (CXPB, MUTPB))
# 選択
# 次世代の個体群を選択
offspring = toolbox.select(pop, len(pop))
# 個体群のクローンを生成
offspring = list(map(toolbox.clone, offspring))
# 選択した個体群に交差と突然変異を適応する
# 交叉
# 偶数番目と奇数番目の個体を取り出して交差
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
# 交叉された個体の適合度を削除する
del child1.fitness.values
del child2.fitness.values
# 変異
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# 適合度が計算されていない個体を集めて適合度を計算
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# 収束するように確率を調整
CXPB = CXPB * enu.CROSS_LOSS
MUTPB = MUTPB * enu.MULTATION_LOSS
print(" %i の個体を評価" % len(invalid_ind))
# 次世代群をoffspringにする
pop[:] = offspring
# すべての個体の適合度を配列にする
index = 1
for v in ind.fitness.values:
fits = [v for ind in pop]
length = len(pop)
mean = sum(fits) / length
sum2 = sum(x*x for x in fits)
std = abs(sum2 / length - mean**2)**0.5
print("* パラメータ%d") % index
print(" Min %s" % min(fits))
print(" Max %s" % max(fits))
print(" Avg %s" % mean)
print(" Std %s" % std)
index += 1
print("-- 進化終了 --")
best_ind = tools.selBest(pop, 1)[0]
print("最も優れていた個体: %s, %s" % (best_ind, best_ind.fitness.values))
shift = shi.Shift(best_ind)
print("-- 出力 --")
shift.print_csv()
#shift.print_tsv()
|
ja
| 0.956138
|
# -*- coding: utf-8 -*- deap setting # 評価関数のFit率の重要度(小さい値の方が重要視される) creator.create("FitnessPeopleCount", base.Fitness, weights=( enu.EVA_WEIGHT_1, enu.EVA_WEIGHT_2, enu.EVA_WEIGHT_3, enu.EVA_WEIGHT_4, enu.EVA_WEIGHT_5, enu.EVA_WEIGHT_6, enu.EVA_WEIGHT_7, enu.EVA_WEIGHT_8, enu.EVA_WEIGHT_9, enu.EVA_WEIGHT_10, enu.EVA_WEIGHT_11, enu.EVA_WEIGHT_12, enu.EVA_WEIGHT_13, enu.EVA_WEIGHT_14, enu.EVA_WEIGHT_15, enu.EVA_WEIGHT_16 )) # 交叉関数を定義(二点交叉) # 変異関数を定義(ビット反転、変異確率が5%) # 選択関数を定義(トーナメント選択、tournsizeはトーナメントの数) # 初期集団を生成する # 交差確率、突然変異確>率、進化計算のループ回数 # 初期集団の個体を評価する # zipは複数変数の同時ループ # 適合性をセットする # 進化計算開始 # 選択 # 次世代の個体群を選択 # 個体群のクローンを生成 # 選択した個体群に交差と突然変異を適応する # 交叉 # 偶数番目と奇数番目の個体を取り出して交差 # 交叉された個体の適合度を削除する # 変異 # 適合度が計算されていない個体を集めて適合度を計算 # 収束するように確率を調整 # 次世代群をoffspringにする # すべての個体の適合度を配列にする #shift.print_tsv()
| 1.775223
| 2
|
Leetcode/0515. Find Largest Value in Each Tree Row/0515.py
|
Next-Gen-UI/Code-Dynamics
| 0
|
6625931
|
<filename>Leetcode/0515. Find Largest Value in Each Tree Row/0515.py
class Solution:
def largestValues(self, root: Optional[TreeNode]) -> List[int]:
if not root:
return []
ans = []
q = deque([root])
while q:
maxi = -math.inf
for _ in range(len(q)):
root = q.popleft()
maxi = max(maxi, root.val)
if root.left:
q.append(root.left)
if root.right:
q.append(root.right)
ans.append(maxi)
return ans
|
<filename>Leetcode/0515. Find Largest Value in Each Tree Row/0515.py
class Solution:
def largestValues(self, root: Optional[TreeNode]) -> List[int]:
if not root:
return []
ans = []
q = deque([root])
while q:
maxi = -math.inf
for _ in range(len(q)):
root = q.popleft()
maxi = max(maxi, root.val)
if root.left:
q.append(root.left)
if root.right:
q.append(root.right)
ans.append(maxi)
return ans
|
none
| 1
| 3.549036
| 4
|
|
ode/Development/emulatedClientApps/SituationalDataApp/tests/test_ODE_Multiple_Clients.py
|
OSADP/SEMI-ODE
| 0
|
6625932
|
<reponame>OSADP/SEMI-ODE
import unittest
import sys
import os
import json
import logging
import time
import datetime
from __init__ import LogThisTestCase
import testRunnerHelper
try:
import odeClient
except:
current_file_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(1, os.path.join(current_file_path, '..', '..', '..', 'apps', 'PythonSDK'))
try:
from odeClient import client, timehelpers, dataType, exceptions, restApi
from odeClient.response import BaseResponse
except:
print "Error Importing ODE Client. Please install the odeClient Package"
sys.exit(-1)
class ODE_Basic_Subscription_Multiple_Clients_Tests(unittest.TestCase):
__metaclass__ = LogThisTestCase
logger = logging.getLogger("ODE_Multiple_Clients_Tests")
logger.propagate = False
logger.setLevel(logging.DEBUG)
config1 = testRunnerHelper.parse_config_file(
os.path.join('.', 'test_config_files', 'test_Basic_Multiple_Clients_config_1.ini'))
config2 = testRunnerHelper.parse_config_file(
os.path.join('.', 'test_config_files', 'test_Basic_Multiple_Clients_config_2.ini'))
client1 = client2 = None
def create_asyncClient_and_request(self,the_config,**kwargs):
ode = client.ODEClient(the_config['HOST'])
ode.get_token(the_config['USERNAME'], the_config['PASSWORD'])
asycODE = client.AsyncODEClient(odeClient=ode)
if the_config['REQUEST_TYPE'] in ['sub',]:
request = testRunnerHelper.build_subscription_request(the_config, **kwargs)
else:
request = testRunnerHelper.build_query_request(the_config,**kwargs)
self.logger.info("Request: %s", request.toJson())
asycODE.setRequest(request)
return asycODE
def stop_client(self,client):
client.client.destroy_token()
client.stop()
def setUp(self):
pass
def tearDown(self):
try:
self.stop_client(self.client1)
self.stop_client(self.client2)
except Exception as e:
pass
time.sleep(3)
def test_Connection_to_Vehicle_Query_API(self):
dataType = 'veh'
requestType = 'qry'
self.config1['DATA'] = dataType
self.config2['DATA'] = dataType
self.config1['REQUEST_TYPE'] = requestType
self.config2['REQUEST_TYPE'] = requestType
self.client1 = self.create_asyncClient_and_request(self.config1)
self.client2 = self.create_asyncClient_and_request(self.config2)
start_time = datetime.datetime.utcnow()
self.client1.start()
self.client2.start()
time_out = 180
time.sleep(time_out)
# TODO Assert Something
# Assert Location and Date time is correct
# assert
msg1 = self.client1.get_all_messages()
self.logger.info("Processing Client 1 Messages")
for m in msg1:
# self.logger.info( m.toJson() )
if m.get_payload_type() in ('veh',):
self.assertTrue(testRunnerHelper.validate_datetime(m.payload, self.config1))
self.assertTrue(testRunnerHelper.validate_location(m.payload, self.config1))
msg2 = self.client2.get_all_messages()
self.logger.info("Processing Client 2 Messages")
for m in msg2:
# self.logger.info( m.toJson())
if m.get_payload_type() in ('veh',):
self.assertTrue(testRunnerHelper.validate_datetime(m.payload, self.config2))
self.assertTrue(testRunnerHelper.validate_location(m.payload, self.config2))
self.logger.info('Records Received: %d', len(msg1))
self.logger.info('Records Received: %d', len(msg2))
self.assertGreaterEqual(len(msg1), 100)
self.assertGreaterEqual(len(msg2), 100)
self.logger.info("All Records Validated against spatial and temporal parameters")
def test_Connection_to_Vehicle_Subscription_API(self ):
dataType = 'veh'
requestType = 'sub'
self.config1['DATA'] = dataType
self.config2['DATA'] = dataType
self.config1['REQUEST_TYPE'] = requestType
self.config2['REQUEST_TYPE'] = requestType
self.client1 = self.create_asyncClient_and_request(self.config1)
self.client2 = self.create_asyncClient_and_request(self.config2)
start_time = datetime.datetime.utcnow()
self.client1.start()
self.client2.start()
time_out = 180
time.sleep(time_out)
# TODO Assert Something
# Assert Location and Date time is correct
# assert
msg1 = self.client1.get_all_messages()
self.logger.info("Processing Client 1 Messages")
for m in msg1:
# self.logger.info( m.toJson() )
if m.get_payload_type() in ('veh',):
self.assertTrue(testRunnerHelper.validate_location(m.payload, self.config1))
msg2 = self.client2.get_all_messages()
self.logger.info("Processing Client 2 Messages")
for m in msg2:
# self.logger.info( m.toJson())
if m.get_payload_type() in ('veh',):
self.assertTrue(testRunnerHelper.validate_location(m.payload, self.config2))
self.logger.info('Records Received: %d', len(msg1))
self.logger.info('Records Received: %d', len(msg2))
# self.assertGreaterEqual(len(msg1), 50)
# self.assertGreaterEqual(len(msg2), 50)
self.logger.info("All Records Validated against temporal parameters")
if __name__ == "__main__":
unittest.main() # run all Test
|
import unittest
import sys
import os
import json
import logging
import time
import datetime
from __init__ import LogThisTestCase
import testRunnerHelper
try:
import odeClient
except:
current_file_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(1, os.path.join(current_file_path, '..', '..', '..', 'apps', 'PythonSDK'))
try:
from odeClient import client, timehelpers, dataType, exceptions, restApi
from odeClient.response import BaseResponse
except:
print "Error Importing ODE Client. Please install the odeClient Package"
sys.exit(-1)
class ODE_Basic_Subscription_Multiple_Clients_Tests(unittest.TestCase):
__metaclass__ = LogThisTestCase
logger = logging.getLogger("ODE_Multiple_Clients_Tests")
logger.propagate = False
logger.setLevel(logging.DEBUG)
config1 = testRunnerHelper.parse_config_file(
os.path.join('.', 'test_config_files', 'test_Basic_Multiple_Clients_config_1.ini'))
config2 = testRunnerHelper.parse_config_file(
os.path.join('.', 'test_config_files', 'test_Basic_Multiple_Clients_config_2.ini'))
client1 = client2 = None
def create_asyncClient_and_request(self,the_config,**kwargs):
ode = client.ODEClient(the_config['HOST'])
ode.get_token(the_config['USERNAME'], the_config['PASSWORD'])
asycODE = client.AsyncODEClient(odeClient=ode)
if the_config['REQUEST_TYPE'] in ['sub',]:
request = testRunnerHelper.build_subscription_request(the_config, **kwargs)
else:
request = testRunnerHelper.build_query_request(the_config,**kwargs)
self.logger.info("Request: %s", request.toJson())
asycODE.setRequest(request)
return asycODE
def stop_client(self,client):
client.client.destroy_token()
client.stop()
def setUp(self):
pass
def tearDown(self):
try:
self.stop_client(self.client1)
self.stop_client(self.client2)
except Exception as e:
pass
time.sleep(3)
def test_Connection_to_Vehicle_Query_API(self):
dataType = 'veh'
requestType = 'qry'
self.config1['DATA'] = dataType
self.config2['DATA'] = dataType
self.config1['REQUEST_TYPE'] = requestType
self.config2['REQUEST_TYPE'] = requestType
self.client1 = self.create_asyncClient_and_request(self.config1)
self.client2 = self.create_asyncClient_and_request(self.config2)
start_time = datetime.datetime.utcnow()
self.client1.start()
self.client2.start()
time_out = 180
time.sleep(time_out)
# TODO Assert Something
# Assert Location and Date time is correct
# assert
msg1 = self.client1.get_all_messages()
self.logger.info("Processing Client 1 Messages")
for m in msg1:
# self.logger.info( m.toJson() )
if m.get_payload_type() in ('veh',):
self.assertTrue(testRunnerHelper.validate_datetime(m.payload, self.config1))
self.assertTrue(testRunnerHelper.validate_location(m.payload, self.config1))
msg2 = self.client2.get_all_messages()
self.logger.info("Processing Client 2 Messages")
for m in msg2:
# self.logger.info( m.toJson())
if m.get_payload_type() in ('veh',):
self.assertTrue(testRunnerHelper.validate_datetime(m.payload, self.config2))
self.assertTrue(testRunnerHelper.validate_location(m.payload, self.config2))
self.logger.info('Records Received: %d', len(msg1))
self.logger.info('Records Received: %d', len(msg2))
self.assertGreaterEqual(len(msg1), 100)
self.assertGreaterEqual(len(msg2), 100)
self.logger.info("All Records Validated against spatial and temporal parameters")
def test_Connection_to_Vehicle_Subscription_API(self ):
dataType = 'veh'
requestType = 'sub'
self.config1['DATA'] = dataType
self.config2['DATA'] = dataType
self.config1['REQUEST_TYPE'] = requestType
self.config2['REQUEST_TYPE'] = requestType
self.client1 = self.create_asyncClient_and_request(self.config1)
self.client2 = self.create_asyncClient_and_request(self.config2)
start_time = datetime.datetime.utcnow()
self.client1.start()
self.client2.start()
time_out = 180
time.sleep(time_out)
# TODO Assert Something
# Assert Location and Date time is correct
# assert
msg1 = self.client1.get_all_messages()
self.logger.info("Processing Client 1 Messages")
for m in msg1:
# self.logger.info( m.toJson() )
if m.get_payload_type() in ('veh',):
self.assertTrue(testRunnerHelper.validate_location(m.payload, self.config1))
msg2 = self.client2.get_all_messages()
self.logger.info("Processing Client 2 Messages")
for m in msg2:
# self.logger.info( m.toJson())
if m.get_payload_type() in ('veh',):
self.assertTrue(testRunnerHelper.validate_location(m.payload, self.config2))
self.logger.info('Records Received: %d', len(msg1))
self.logger.info('Records Received: %d', len(msg2))
# self.assertGreaterEqual(len(msg1), 50)
# self.assertGreaterEqual(len(msg2), 50)
self.logger.info("All Records Validated against temporal parameters")
if __name__ == "__main__":
unittest.main() # run all Test
|
en
| 0.265533
|
# TODO Assert Something # Assert Location and Date time is correct # assert # self.logger.info( m.toJson() ) # self.logger.info( m.toJson()) # TODO Assert Something # Assert Location and Date time is correct # assert # self.logger.info( m.toJson() ) # self.logger.info( m.toJson()) # self.assertGreaterEqual(len(msg1), 50) # self.assertGreaterEqual(len(msg2), 50) # run all Test
| 2.238812
| 2
|
powerdown_startup.py
|
Wish1991/Python
| 1
|
6625933
|
# Script Name : powerdown_startup.py
# Author : <NAME>
# Created : 05th January 2012
# Last Modified : 21th September 2017
# Version : 1.0
# Modifications :
# Description : This goes through the server list and pings the machine, if it's up it will load the putty session, if its not it will notify you.
import os # Load the Library Module
import subprocess # Load the Library Module
from time import strftime # Load just the strftime Module from Time
def windows(): # This is the function to run if it detects the OS is windows.
f = open("server_startup_" + strftime("%Y-%m-%d") + ".log", "a") # Open the logfile
for server in open(
"startup_list.txt", "r"
): # Read the list of servers from the list
ret = subprocess.call(
"ping -n 3 %s" % server,
shell=True,
stdout=open("NUL", "w"),
stderr=subprocess.STDOUT,
) # Ping the servers in turn
if ret == 0: # If you get a response.
f.write(
"%s: is alive, loading PuTTY session" % server.strip() + "\n"
) # Write out to the logfile
subprocess.Popen(("putty -load " + server)) # Load the putty session
else:
f.write(
"%s : did not respond" % server.strip() + "\n"
) # Write to the logfile if the server is down
def linux():
f = open("server_startup_" + strftime("%Y-%m-%d") + ".log", "a") # Open the logfile
for server in open("startup_list.txt"): # Read the list of servers from the list
ret = subprocess.call(
"ping -c 3 %s" % server,
shell=True,
stdout=open("/dev/null", "w"),
stderr=subprocess.STDOUT,
) # Ping the servers in turn
if ret == 0: # If you get a response.
f.write("%s: is alive" % server.strip() + "\n") # Print a message
subprocess.Popen(["ssh", server.strip()])
else:
f.write("%s: did not respond" % server.strip() + "\n")
# End of the functions
# Start of the Main Program
if os.name == "posix": # If the OS is linux...
linux() # Call the linux function
elif os.name in ("nt", "dos", "ce"): # If the OS is Windows...
windows() # Call the windows function
else:
print("Not supported")
|
# Script Name : powerdown_startup.py
# Author : <NAME>
# Created : 05th January 2012
# Last Modified : 21th September 2017
# Version : 1.0
# Modifications :
# Description : This goes through the server list and pings the machine, if it's up it will load the putty session, if its not it will notify you.
import os # Load the Library Module
import subprocess # Load the Library Module
from time import strftime # Load just the strftime Module from Time
def windows(): # This is the function to run if it detects the OS is windows.
f = open("server_startup_" + strftime("%Y-%m-%d") + ".log", "a") # Open the logfile
for server in open(
"startup_list.txt", "r"
): # Read the list of servers from the list
ret = subprocess.call(
"ping -n 3 %s" % server,
shell=True,
stdout=open("NUL", "w"),
stderr=subprocess.STDOUT,
) # Ping the servers in turn
if ret == 0: # If you get a response.
f.write(
"%s: is alive, loading PuTTY session" % server.strip() + "\n"
) # Write out to the logfile
subprocess.Popen(("putty -load " + server)) # Load the putty session
else:
f.write(
"%s : did not respond" % server.strip() + "\n"
) # Write to the logfile if the server is down
def linux():
f = open("server_startup_" + strftime("%Y-%m-%d") + ".log", "a") # Open the logfile
for server in open("startup_list.txt"): # Read the list of servers from the list
ret = subprocess.call(
"ping -c 3 %s" % server,
shell=True,
stdout=open("/dev/null", "w"),
stderr=subprocess.STDOUT,
) # Ping the servers in turn
if ret == 0: # If you get a response.
f.write("%s: is alive" % server.strip() + "\n") # Print a message
subprocess.Popen(["ssh", server.strip()])
else:
f.write("%s: did not respond" % server.strip() + "\n")
# End of the functions
# Start of the Main Program
if os.name == "posix": # If the OS is linux...
linux() # Call the linux function
elif os.name in ("nt", "dos", "ce"): # If the OS is Windows...
windows() # Call the windows function
else:
print("Not supported")
|
en
| 0.743449
|
# Script Name : powerdown_startup.py # Author : <NAME> # Created : 05th January 2012 # Last Modified : 21th September 2017 # Version : 1.0 # Modifications : # Description : This goes through the server list and pings the machine, if it's up it will load the putty session, if its not it will notify you. # Load the Library Module # Load the Library Module # Load just the strftime Module from Time # This is the function to run if it detects the OS is windows. # Open the logfile # Read the list of servers from the list # Ping the servers in turn # If you get a response. # Write out to the logfile # Load the putty session # Write to the logfile if the server is down # Open the logfile # Read the list of servers from the list # Ping the servers in turn # If you get a response. # Print a message # End of the functions # Start of the Main Program # If the OS is linux... # Call the linux function # If the OS is Windows... # Call the windows function
| 3.166847
| 3
|
2021.4/runs/benchmark/bionetwork/cobra_models/bionetwork.py
|
CIDARLAB/genetic-circuit-partitioning
| 1
|
6625934
|
<filename>2021.4/runs/benchmark/bionetwork/cobra_models/bionetwork.py
'''
--------------------------------------------------------------------------------
Description:
Roadmap:
Written by <NAME> <<EMAIL>>, DAMP Lab 2020
--------------------------------------------------------------------------------
'''
import cobra
import matplotlib.pyplot as plt
import networkx as nx
import tqdm
# graph = nx.DiGraph()
# input_model = cobra.io.read_sbml_model("e_coli_core.xml")
# for reaction_object in input_model.reactions:
# name = reaction_object.id
# metabolites = reaction_object.metabolites
# reactants = reaction_object.reactants
# products = reaction_object.products
# for reactant in reactants:
# for product in products:
# graph.add_edge(reactant, product)
### save to edgelist
# f_out = open('DAG.edgelist', 'w')
# for edge in list(graph.edges()):
# f_out.write(str(edge[0]) + ' ' + str(edge[1]) + ' ' + '{'+'}' + '\n')
pos = nx.kamada_kawai_layout(graph)
plt.figure(num=None, figsize=(8,8), dpi=80)
nx.draw(
graph,
pos=pos,
horizontalalignment='left',
verticalalignment='bottom',
node_color='coral'
)
plt.savefig('./core/DAG.pdf')
plt.show()
print(f'The Number of Nodes is {len(graph.nodes)}')
print(f'The Number of Edges is {len(graph.edges)}')
|
<filename>2021.4/runs/benchmark/bionetwork/cobra_models/bionetwork.py
'''
--------------------------------------------------------------------------------
Description:
Roadmap:
Written by <NAME> <<EMAIL>>, DAMP Lab 2020
--------------------------------------------------------------------------------
'''
import cobra
import matplotlib.pyplot as plt
import networkx as nx
import tqdm
# graph = nx.DiGraph()
# input_model = cobra.io.read_sbml_model("e_coli_core.xml")
# for reaction_object in input_model.reactions:
# name = reaction_object.id
# metabolites = reaction_object.metabolites
# reactants = reaction_object.reactants
# products = reaction_object.products
# for reactant in reactants:
# for product in products:
# graph.add_edge(reactant, product)
### save to edgelist
# f_out = open('DAG.edgelist', 'w')
# for edge in list(graph.edges()):
# f_out.write(str(edge[0]) + ' ' + str(edge[1]) + ' ' + '{'+'}' + '\n')
pos = nx.kamada_kawai_layout(graph)
plt.figure(num=None, figsize=(8,8), dpi=80)
nx.draw(
graph,
pos=pos,
horizontalalignment='left',
verticalalignment='bottom',
node_color='coral'
)
plt.savefig('./core/DAG.pdf')
plt.show()
print(f'The Number of Nodes is {len(graph.nodes)}')
print(f'The Number of Edges is {len(graph.edges)}')
|
en
| 0.449962
|
-------------------------------------------------------------------------------- Description: Roadmap: Written by <NAME> <<EMAIL>>, DAMP Lab 2020 -------------------------------------------------------------------------------- # graph = nx.DiGraph() # input_model = cobra.io.read_sbml_model("e_coli_core.xml") # for reaction_object in input_model.reactions: # name = reaction_object.id # metabolites = reaction_object.metabolites # reactants = reaction_object.reactants # products = reaction_object.products # for reactant in reactants: # for product in products: # graph.add_edge(reactant, product) ### save to edgelist # f_out = open('DAG.edgelist', 'w') # for edge in list(graph.edges()): # f_out.write(str(edge[0]) + ' ' + str(edge[1]) + ' ' + '{'+'}' + '\n')
| 2.154191
| 2
|
updater.py
|
The-Hacker894/LOG-A-PING
| 0
|
6625935
|
<filename>updater.py
#
#
#Made by https://github.com/iraizo
#
#
import requests
import sys
import time
import subprocess
import config
from git import Repo
url = str(config.versionurl)
r = requests.get(url)
cache = r.text # version with dots
currentversion = cache.replace(".", "") # version without dots
try:
# getting local version.txt to compare it /read it
file = open("version.txt", "r")
cache2 = file.read()
cache2.strip()
except (FileNotFoundError): # if version.txt isnt found
print("File version.txt not found.")
time.sleep(2)
print("Exiting..")
time.sleep(2)
sys.exit()
localversion = cache2.replace(".", "") # removing dots
localversion.strip()
currentversion.strip()
if int(currentversion) != int(localversion): # checking if new version is available
print("update is available")
path = input("put in the path where you want to download it: ") # asking for path
print("Downloading update..")
Repo.clone_from(config.repourl, path)
else:
print("No update found.")
print("Youre good to go.")
|
<filename>updater.py
#
#
#Made by https://github.com/iraizo
#
#
import requests
import sys
import time
import subprocess
import config
from git import Repo
url = str(config.versionurl)
r = requests.get(url)
cache = r.text # version with dots
currentversion = cache.replace(".", "") # version without dots
try:
# getting local version.txt to compare it /read it
file = open("version.txt", "r")
cache2 = file.read()
cache2.strip()
except (FileNotFoundError): # if version.txt isnt found
print("File version.txt not found.")
time.sleep(2)
print("Exiting..")
time.sleep(2)
sys.exit()
localversion = cache2.replace(".", "") # removing dots
localversion.strip()
currentversion.strip()
if int(currentversion) != int(localversion): # checking if new version is available
print("update is available")
path = input("put in the path where you want to download it: ") # asking for path
print("Downloading update..")
Repo.clone_from(config.repourl, path)
else:
print("No update found.")
print("Youre good to go.")
|
en
| 0.830052
|
# # #Made by https://github.com/iraizo # # # version with dots # version without dots # getting local version.txt to compare it /read it # if version.txt isnt found # removing dots # checking if new version is available # asking for path
| 3.097164
| 3
|
crypto/frank/src/client.py
|
cclauss/fbctf-2019-challenges
| 213
|
6625936
|
<gh_stars>100-1000
import binascii
import os
import socket
from collections import defaultdict
import crypto
class RemoteServer(object):
def __init__(self, host, port):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((host, port))
def _get_line_str(self):
out = b''
while True:
out += self._sock.recv(1)
if out[-1] == ord("\n"):
return out[0:-1]
def _get_lines_until_done(self):
lines = []
while True:
line = self._get_line_str().strip()
if line == b"done":
return lines
lines.append(line)
def _send_str(self, cmd, arg):
self._sock.sendall(b"%s %s\n" % (cmd, arg))
def register(self, pubkey):
self._sock.sendall(b"%s\n" % pubkey)
return self._get_line_str()
def put_msg(self, msg):
self._send_str(b"put", msg)
return self._get_line_str()
def get_msg(self, mid):
self._send_str(b"get", mid)
return self._get_line_str()
def get_user(self, user):
self._send_str(b"key", user)
return self._get_line_str()
def list_users(self):
self._send_str(b"list", b"")
return self._get_lines_until_done()
def send(self, _, user, msg):
self._send_str(b"send %s" % user, msg)
return self._get_line_str()
def recv(self, _):
self._send_str(b"recv", b"")
return self._get_lines_until_done()
def report(self, _, who, ts, ctxt, fbtag, msg):
self._sock.sendall(b"report %s %s %s %s %s\n" % (
who,
ts,
ctxt,
fbtag,
msg,
))
return self._get_line_str()
class Client(object):
def __init__(self, server):
self.server = server
self._priv_key = crypto.generate_rsa_key()
self.uid = self.server.register(
binascii.hexlify(
crypto.get_pubkey_bytes(
self._priv_key.public_key()
)
)
)
self._all_messages = defaultdict(list)
def list(self):
return self.server.list_users()
def send(self, msg, *users):
km, cm = crypto.encrypt_inner(msg)
mid = self.server.put_msg(binascii.hexlify(cm))
hcm = crypto.hash(cm)
for user in users:
self._send_ctxt(mid, km, hcm, user)
def _send_ctxt(self, mid, km, hcm, user):
out_msg = mid + km + hcm
pubkey = binascii.unhexlify(self.server.get_user(user))
ctxt, com = crypto.encrypt_outer(out_msg, pubkey)
out = self.server.send(self.uid, user, binascii.hexlify(ctxt + com))
assert out == b"sent", out
def recv(self):
lines = self.server.recv(self.uid)
msgs = []
for line in lines:
who, ts, msg, fbtag = line.split(b" ")
msgs.append(
(who, int(ts), binascii.unhexlify(msg), binascii.unhexlify(fbtag))
)
out = []
for (who, ts, ctxt, fbtag) in msgs:
msg = crypto.decrypt_outer(ctxt, self._priv_key)
(mid, km, hcm, _) = crypto.split_outer_message(msg)
cm = binascii.unhexlify(self.server.get_msg(mid))
assert crypto.hash(cm) == hcm, "bad message hash"
m = crypto.decrypt_inner(km, cm)
self._all_messages[who].append((mid, ts, ctxt, msg, fbtag))
out.append((who, mid, m))
return out
def report(self, who, mid):
(_, ts, ctxt, msg, fbtag) = [
x for x in self._all_messages[who] if x[0] == mid
][0]
return self.server.report(
self.uid,
who,
str(ts).encode('utf-8'),
binascii.hexlify(ctxt),
binascii.hexlify(fbtag),
binascii.hexlify(msg),
)
|
import binascii
import os
import socket
from collections import defaultdict
import crypto
class RemoteServer(object):
def __init__(self, host, port):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((host, port))
def _get_line_str(self):
out = b''
while True:
out += self._sock.recv(1)
if out[-1] == ord("\n"):
return out[0:-1]
def _get_lines_until_done(self):
lines = []
while True:
line = self._get_line_str().strip()
if line == b"done":
return lines
lines.append(line)
def _send_str(self, cmd, arg):
self._sock.sendall(b"%s %s\n" % (cmd, arg))
def register(self, pubkey):
self._sock.sendall(b"%s\n" % pubkey)
return self._get_line_str()
def put_msg(self, msg):
self._send_str(b"put", msg)
return self._get_line_str()
def get_msg(self, mid):
self._send_str(b"get", mid)
return self._get_line_str()
def get_user(self, user):
self._send_str(b"key", user)
return self._get_line_str()
def list_users(self):
self._send_str(b"list", b"")
return self._get_lines_until_done()
def send(self, _, user, msg):
self._send_str(b"send %s" % user, msg)
return self._get_line_str()
def recv(self, _):
self._send_str(b"recv", b"")
return self._get_lines_until_done()
def report(self, _, who, ts, ctxt, fbtag, msg):
self._sock.sendall(b"report %s %s %s %s %s\n" % (
who,
ts,
ctxt,
fbtag,
msg,
))
return self._get_line_str()
class Client(object):
def __init__(self, server):
self.server = server
self._priv_key = crypto.generate_rsa_key()
self.uid = self.server.register(
binascii.hexlify(
crypto.get_pubkey_bytes(
self._priv_key.public_key()
)
)
)
self._all_messages = defaultdict(list)
def list(self):
return self.server.list_users()
def send(self, msg, *users):
km, cm = crypto.encrypt_inner(msg)
mid = self.server.put_msg(binascii.hexlify(cm))
hcm = crypto.hash(cm)
for user in users:
self._send_ctxt(mid, km, hcm, user)
def _send_ctxt(self, mid, km, hcm, user):
out_msg = mid + km + hcm
pubkey = binascii.unhexlify(self.server.get_user(user))
ctxt, com = crypto.encrypt_outer(out_msg, pubkey)
out = self.server.send(self.uid, user, binascii.hexlify(ctxt + com))
assert out == b"sent", out
def recv(self):
lines = self.server.recv(self.uid)
msgs = []
for line in lines:
who, ts, msg, fbtag = line.split(b" ")
msgs.append(
(who, int(ts), binascii.unhexlify(msg), binascii.unhexlify(fbtag))
)
out = []
for (who, ts, ctxt, fbtag) in msgs:
msg = crypto.decrypt_outer(ctxt, self._priv_key)
(mid, km, hcm, _) = crypto.split_outer_message(msg)
cm = binascii.unhexlify(self.server.get_msg(mid))
assert crypto.hash(cm) == hcm, "bad message hash"
m = crypto.decrypt_inner(km, cm)
self._all_messages[who].append((mid, ts, ctxt, msg, fbtag))
out.append((who, mid, m))
return out
def report(self, who, mid):
(_, ts, ctxt, msg, fbtag) = [
x for x in self._all_messages[who] if x[0] == mid
][0]
return self.server.report(
self.uid,
who,
str(ts).encode('utf-8'),
binascii.hexlify(ctxt),
binascii.hexlify(fbtag),
binascii.hexlify(msg),
)
|
none
| 1
| 3.062675
| 3
|
|
pyleecan/Methods/Geometry/Arc1/reverse.py
|
IrakozeFD/pyleecan
| 95
|
6625937
|
<reponame>IrakozeFD/pyleecan<filename>pyleecan/Methods/Geometry/Arc1/reverse.py
def reverse(self):
"""Reverse the begin and end point of the Line
Parameters
----------
self : Arc1
An Arc1 object
Returns
-------
"""
end = self.end
self.end = self.begin
self.begin = end
self.radius = -self.radius
self.is_trigo_direction = not self.is_trigo_direction
|
def reverse(self):
"""Reverse the begin and end point of the Line
Parameters
----------
self : Arc1
An Arc1 object
Returns
-------
"""
end = self.end
self.end = self.begin
self.begin = end
self.radius = -self.radius
self.is_trigo_direction = not self.is_trigo_direction
|
en
| 0.357807
|
Reverse the begin and end point of the Line Parameters ---------- self : Arc1 An Arc1 object Returns -------
| 3.917638
| 4
|
Neural Machine Translation/search/__init__.py
|
jtianesq/protein-prediction-nmt-evo
| 0
|
6625938
|
<filename>Neural Machine Translation/search/__init__.py<gh_stars>0
# __init__.py
from .beam import beam
from utils import flatten, pack_sequence_as
__all__ = ["beam", "select_nbest"]
# nested: a nested structure of shape batch * dim
# indices: indices to select
def select_nbest(nested, indices):
if not isinstance(nested, (list, tuple)):
return nested[indices]
flat_list = flatten(nested)
selected_list = [item[indices] for item in flat_list]
return pack_sequence_as(nested, selected_list)
|
<filename>Neural Machine Translation/search/__init__.py<gh_stars>0
# __init__.py
from .beam import beam
from utils import flatten, pack_sequence_as
__all__ = ["beam", "select_nbest"]
# nested: a nested structure of shape batch * dim
# indices: indices to select
def select_nbest(nested, indices):
if not isinstance(nested, (list, tuple)):
return nested[indices]
flat_list = flatten(nested)
selected_list = [item[indices] for item in flat_list]
return pack_sequence_as(nested, selected_list)
|
en
| 0.645207
|
# __init__.py # nested: a nested structure of shape batch * dim # indices: indices to select
| 2.446321
| 2
|
examples/excelExpr.py
|
james-emerton/pyparsing
| 2
|
6625939
|
# excelExpr.py
#
# Copyright 2010, <NAME>
#
# A partial implementation of a parser of Excel formula expressions.
#
from pyparsing import (CaselessKeyword, Suppress, Word, alphas,
alphanums, nums, Optional, Group, oneOf, Forward, Regex,
infixNotation, opAssoc, dblQuotedString, delimitedList,
Combine, Literal, QuotedString, ParserElement, pyparsing_common)
ParserElement.enablePackrat()
EQ,LPAR,RPAR,COLON,COMMA = map(Suppress, '=():,')
EXCL, DOLLAR = map(Literal,"!$")
sheetRef = Word(alphas, alphanums) | QuotedString("'",escQuote="''")
colRef = Optional(DOLLAR) + Word(alphas,max=2)
rowRef = Optional(DOLLAR) + Word(nums)
cellRef = Combine(Group(Optional(sheetRef + EXCL)("sheet") + colRef("col") +
rowRef("row")))
cellRange = (Group(cellRef("start") + COLON + cellRef("end"))("range")
| cellRef | Word(alphas,alphanums))
expr = Forward()
COMPARISON_OP = oneOf("< = > >= <= != <>")
condExpr = expr + COMPARISON_OP + expr
ifFunc = (CaselessKeyword("if") -
LPAR +
Group(condExpr)("condition") +
COMMA + Group(expr)("if_true") +
COMMA + Group(expr)("if_false") + RPAR)
statFunc = lambda name : Group(CaselessKeyword(name) + Group(LPAR + delimitedList(expr) + RPAR))
sumFunc = statFunc("sum")
minFunc = statFunc("min")
maxFunc = statFunc("max")
aveFunc = statFunc("ave")
funcCall = ifFunc | sumFunc | minFunc | maxFunc | aveFunc
multOp = oneOf("* /")
addOp = oneOf("+ -")
numericLiteral = pyparsing_common.number
operand = numericLiteral | funcCall | cellRange | cellRef
arithExpr = infixNotation(operand,
[
(multOp, 2, opAssoc.LEFT),
(addOp, 2, opAssoc.LEFT),
])
textOperand = dblQuotedString | cellRef
textExpr = infixNotation(textOperand,
[
('&', 2, opAssoc.LEFT),
])
expr << (arithExpr | textExpr)
(EQ + expr).runTests("""\
=3*A7+5
=3*Sheet1!$A$7+5
=3*'Sheet 1'!$A$7+5"
=3*'O''Reilly''s sheet'!$A$7+5
=if(Sum(A1:A25)>42,Min(B1:B25),if(Sum(C1:C25)>3.14, (Min(C1:C25)+3)*18,Max(B1:B25)))
=sum(a1:a25,10,min(b1,c2,d3))
=if("T"&a2="TTime", "Ready", "Not ready")
""")
|
# excelExpr.py
#
# Copyright 2010, <NAME>
#
# A partial implementation of a parser of Excel formula expressions.
#
from pyparsing import (CaselessKeyword, Suppress, Word, alphas,
alphanums, nums, Optional, Group, oneOf, Forward, Regex,
infixNotation, opAssoc, dblQuotedString, delimitedList,
Combine, Literal, QuotedString, ParserElement, pyparsing_common)
ParserElement.enablePackrat()
EQ,LPAR,RPAR,COLON,COMMA = map(Suppress, '=():,')
EXCL, DOLLAR = map(Literal,"!$")
sheetRef = Word(alphas, alphanums) | QuotedString("'",escQuote="''")
colRef = Optional(DOLLAR) + Word(alphas,max=2)
rowRef = Optional(DOLLAR) + Word(nums)
cellRef = Combine(Group(Optional(sheetRef + EXCL)("sheet") + colRef("col") +
rowRef("row")))
cellRange = (Group(cellRef("start") + COLON + cellRef("end"))("range")
| cellRef | Word(alphas,alphanums))
expr = Forward()
COMPARISON_OP = oneOf("< = > >= <= != <>")
condExpr = expr + COMPARISON_OP + expr
ifFunc = (CaselessKeyword("if") -
LPAR +
Group(condExpr)("condition") +
COMMA + Group(expr)("if_true") +
COMMA + Group(expr)("if_false") + RPAR)
statFunc = lambda name : Group(CaselessKeyword(name) + Group(LPAR + delimitedList(expr) + RPAR))
sumFunc = statFunc("sum")
minFunc = statFunc("min")
maxFunc = statFunc("max")
aveFunc = statFunc("ave")
funcCall = ifFunc | sumFunc | minFunc | maxFunc | aveFunc
multOp = oneOf("* /")
addOp = oneOf("+ -")
numericLiteral = pyparsing_common.number
operand = numericLiteral | funcCall | cellRange | cellRef
arithExpr = infixNotation(operand,
[
(multOp, 2, opAssoc.LEFT),
(addOp, 2, opAssoc.LEFT),
])
textOperand = dblQuotedString | cellRef
textExpr = infixNotation(textOperand,
[
('&', 2, opAssoc.LEFT),
])
expr << (arithExpr | textExpr)
(EQ + expr).runTests("""\
=3*A7+5
=3*Sheet1!$A$7+5
=3*'Sheet 1'!$A$7+5"
=3*'O''Reilly''s sheet'!$A$7+5
=if(Sum(A1:A25)>42,Min(B1:B25),if(Sum(C1:C25)>3.14, (Min(C1:C25)+3)*18,Max(B1:B25)))
=sum(a1:a25,10,min(b1,c2,d3))
=if("T"&a2="TTime", "Ready", "Not ready")
""")
|
en
| 0.555142
|
# excelExpr.py # # Copyright 2010, <NAME> # # A partial implementation of a parser of Excel formula expressions. # \
=3*A7+5
=3*Sheet1!$A$7+5
=3*'Sheet 1'!$A$7+5"
=3*'O''Reilly''s sheet'!$A$7+5
=if(Sum(A1:A25)>42,Min(B1:B25),if(Sum(C1:C25)>3.14, (Min(C1:C25)+3)*18,Max(B1:B25)))
=sum(a1:a25,10,min(b1,c2,d3))
=if("T"&a2="TTime", "Ready", "Not ready")
| 2.723086
| 3
|
code/Experiments/neon-master/neon/backends/nervanacpu.py
|
matthijsvk/convNets
| 53
|
6625940
|
# ----------------------------------------------------------------------------
# Copyright 2014-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Our CPU based backend interface and tensor data structure. Our implementation
wraps :mod:`numpy` ndarray and related operations
"""
from __future__ import division
from builtins import object, round, str, zip
import numpy as np
import logging
import time
from neon.backends.backend import Tensor, Backend, OpTreeNode, OpCollection
from neon.backends.layer_cpu import ConvLayer, DeconvLayer, PoolLayer
from neon.util.compat import xrange
_none_slice = slice(None, None, None)
logger = logging.getLogger(__name__)
# TODO: enable this flag to find numerical problems
# np.seterr(all='raise')
class CPUTensor(Tensor):
"""
The n-dimensional array data structure that resides in host memory,
and is meant to be manipulated on the CPU. wrapped `numpy.ndarray` tensor.
Arguments:
dtype (numpy.ndtype, optional): underlying data type of the elements.
ary (data array, optional): optionally it can be instantiated with
a data array
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor will
persist across multiple begin and end
calls. Setting to False may provide a
performance increase if values do
not need to be maintained across such
calls
See also:
:class:`NervanaCPU` class
"""
_tensor = None
def __init__(self,
backend,
shape=None,
dtype=np.float32,
ary=None,
name=None,
persist_values=True,
base=None):
super(CPUTensor, self).__init__(backend, shape, dtype, name,
persist_values)
# supported dtypes
assert dtype in (np.float16, np.float32, np.float64, np.uint8, np.int8,
np.uint16, np.int16, np.uint32, np.int32)
dtype = np.dtype(dtype)
if type(ary) != np.ndarray:
self._tensor = np.array(ary, dtype)
elif ary.dtype != dtype:
self._tensor = ary.astype(dtype)
else:
self._tensor = ary
while self._tensor.ndim < self._min_dims:
self._tensor = self._tensor.reshape(self._tensor.shape + (1, ))
if shape is not None and len(shape) < self._min_dims:
self.shape = shape + (1, )*(self._min_dims - len(shape))
else:
self.shape = self._tensor.shape
shape_ = []
size = 1
for dim in self.shape:
if int(dim) != dim:
raise TypeError('shape dims must be integer values [%s]' % str(dim))
dim = int(dim)
shape_.append(dim)
size *= dim
self.shape = tuple(shape_)
self.size = size
self.base = base
self.dtype = dtype
self.is_contiguous = self._tensor.flags.c_contiguous
def __str__(self):
"""
Returns a string representation of this Tensor.
Returns:
str: the representation.
"""
if self._tensor.base is not None:
base_id = id(self._tensor.base)
else:
base_id = id(self._tensor)
return ("CPUTensor(base 0x%x) name:%s shape:%s dtype:%s strides:%s"
" is_c_contiguous:%s" % (base_id, self.name, self.shape,
self.dtype, self._tensor.strides,
self._tensor.flags.c_contiguous))
def __repr__(self):
"""
Returns a more unambiguous string representation of the Tensor.
Returns:
str: the representation.
"""
return self.__str__()
def __len__(self):
"""
Return the size of the leading dimension of self.
"""
if len(self.shape):
return self.shape[0]
else:
return 0
def __setitem__(self, key, value):
"""
Assign the specified value to a subset of elements found via slice
style indexing along each dimension. e.g. A[5:10, :] = 4.5.
Each slice consists of start_idx:stop_idx:step_size triplets. If
step_size isn't specified it defaults to 1. If start_idx isn't
specified it defaults to 0. If stop_idx isn't specified it defaults
to the total number of elements along that dimension. As such a slice
value of ':' allows one to select all elements along that dimension.
Arguments:
key (int, slice, tuple): indices of each dimension's slice.
value (numeric array, CPUTensor): values to be assigned to the
extracted element subset. If an
array it should be the same shape
as what key indexes (or be
broadcastable as such).
"""
self.__getitem__(key)._assign(value)
return self
def __getitem__(self, key):
"""
Extract a subset view of the items via slice style indexing
along each dimension. e.g. A[5:10, :]. Each slice consists of
start_idx:stop_idx:step_size triplets. If step_size isn't specified it
defaults to 1. If start_idx isn't specified it defaults to 0. If
stop_idx isn't specified it defaults to the total number of elements
along that dimension. As such a slice value of ':' allows one to
select all elements along that dimension. To be consistent with GPU
Tensors, CPU Tensors remove the axis that has size 1 unless it needs to
maintain 2D.
Arguments:
key (int, slice, tuple): indices of each dimension's slice.
Returns:
CPUTensor: view of self corresponding to the subset items.
"""
# speed up common case of [:]
if not isinstance(key, tuple):
if key == _none_slice:
return self
key = (key,)
# ensure we return a view
# exact same behavior as cpu
# let a.shape = (3,4)
# a[1,1] = 10 # cpu, gpu and numpy
# type(a[1,1]) # for cpu and gpu type is Tensor; for numpy type is float
key_list = list(key)
for idx, k in enumerate(key):
if type(k) is int:
k = self.shape[idx] + k if k < 0 else k
key_list[idx] = slice(k, k + 1, None)
key = tuple(key_list)
new_shape = list(self._tensor[key].shape)
for idx, k in enumerate(new_shape):
if len(new_shape) > 2 and k is 1:
new_shape.remove(k)
# return a view of the tensor
return self.__class__(
backend=self.backend,
ary=self._tensor[key].reshape(new_shape),
dtype=self._tensor.dtype,
base=self)
def _assign(self, value):
"""
Assign an input value to the CPU tensor. The NervanaCPU does clipping
for int and uint types, when overflow happens
Arguments:
value (CPUTensor, OpTreeNode, numeric): the value to be assigned.
"""
if isinstance(value, (CPUTensor, OpTreeNode)):
OpTreeNode.build("assign", self, value)
elif isinstance(value, (int, float, np.ndarray)):
self.set(value)
else:
raise TypeError("Invalid type for assignment: %s" % type(value))
return self
def set(self, value):
"""
Wrap the value into NervanaCPU tensor.
Arguments:
value: Array or single input. If it is array, check and Convert
the dtype and shape. If it is single value, broadcast to
the memory
Returns:
self
"""
if isinstance(value, np.ndarray):
if value.dtype is not self.dtype:
value = value.astype(self.dtype)
assert value.size == self.size
if value.ndim < self._min_dims:
value = value.reshape(self.shape)
self._tensor[:] = value
return self
def get(self):
"""
Return the array.
"""
return self._tensor.copy()
def raw(self):
"""
Access the raw buffer.
Returns:
pointer: A device specific pointer
"""
return self._tensor.ctypes.data
def asnumpyarray(self):
"""
Deprecated.
Scheduled to be removed in 2.0.
Use get() instead.
"""
return self._tensor
def take(self, indices, axis=None):
"""
Select a subset of elements from an array across an axis.
Arguments:
indices (Tensor, numpy ndarray): indicies of elements to select
axis (int): axis across which to select the values
Returns:
Tensor: Tensor with selected values
"""
if type(indices) == self.__class__:
indices = indices._tensor
# if indices are nx1 or 1xn, much of our code assumes these dims are
# collapsed, hence the squeeze call.
if type(indices) == np.ndarray:
indices = indices.squeeze()
new_shape = list(self.shape)
new_shape[axis] = indices.size
return self.__class__(
backend=self.backend,
ary=self._tensor.take(indices, axis).reshape(new_shape),
dtype=self._tensor.dtype,
base=self)
def fill(self, value):
"""
Assign specified value to each element of this CPUTensor.
Arguments:
value (numeric): The value to be assigned to each element.
Return:
CPUTensor: updated view of the data.
"""
self._tensor.fill(value)
return self
def copy(self, a):
"""
Construct and return a deep copy of the Tensor passed.
Arguments:
a (Tensor): the object to copy
Returns:
Tensor: new array object with the same values as input tensor
"""
return self._assign(a)
def copy_from(self, a):
"""
Alias of copy.
Arguments:
a (Tensor): the object to copy
Returns:
Tensor: new array object with the same values as input tensor
"""
return self._assign(a)
def reshape(self, *shape):
"""
Return a reshaped view.
"""
if isinstance(shape[0], (tuple, list)):
shape = tuple(shape[0])
if shape == self.shape:
return self
return self.__class__(
backend=self.backend,
ary=self._tensor.reshape(shape),
dtype=self._tensor.dtype,
base=self)
@property
def T(self):
"""
Return a transposed view.
For 2D tensor, will do a normal transpose
For 3D tensor, will keep the 0 dim, swap the 1 and 2 dimensions
"""
if len(self.shape) <= 2:
ary = self._tensor.transpose()
else:
# support for batched dot.
# perserve outer dimension but reverse inner dims
# shape = np.concatenate((shape[-1:], shape[:-1])
ary = self._tensor.swapaxes(1, 2)
return self.__class__(
backend=self.backend,
ary=ary,
dtype=self._tensor.dtype,
base=self)
def transpose(self, out=None):
"""
Return a transposed view of the data. Alias of .T property
"""
if out:
return OpTreeNode.build("assign", out, self.T)
return self.T
def share(self, shape, dtype=None, name=None):
"""
Return a view: ary, where ary.size <= self.size.
Allows easy sharing of temporary memory
This is mostly provided for compatibility, -- dtype is ignored
"""
size = np.prod(shape)
if size > self.size:
raise ValueError("total size of new array must <= size of parent")
ary = self._tensor.ravel()[:size].reshape(shape)
return self.__class__(
backend=self.backend,
ary=ary,
dtype=self._tensor.dtype,
base=self)
def hist(self, tag):
"""
Compute a histogram of the current tensor values.
Arguments:
tag (string): Tag to identify the current state of the tensor,
useful for disambiguating multiple histograms of the
same tensor at different points in time.
Returns:
Tensor containing the histogram data.
"""
nbins = self.backend.hist_bins
offset = self.backend.hist_offset
bins = np.arange(nbins + 1) + float(offset)
bins[0] = -float('Inf')
np_inp_log_abs = np.rint(
np.log2(np.abs(self._tensor.astype(np.float32))))
np_hist, edges = np.histogram(np_inp_log_abs, density=False, bins=bins)
nc_hist = self.backend._hist_tensor(tag)._assign(np_hist)
return nc_hist
# def repeat(self, repeats, axis):
# return self.__class__(
# backend=self.backend,
# ary=self._tensor.repeat(repeats, axis))
class CustomNumpy(object):
@staticmethod
def argmax(x, axis=1, keepdims=True):
"""
Calls numpy argmax with keepdims.
"""
new_shape = list(x.shape)
new_shape[axis] = 1
new_shape = tuple(new_shape)
return np.argmax(x, axis=axis).reshape(new_shape)
@staticmethod
def argmin(x, axis=1, keepdims=True):
"""
Calls numpy argmin with keepdims.
"""
new_shape = list(x.shape)
new_shape[axis] = 1
new_shape = tuple(new_shape)
return np.argmin(x, axis=axis).reshape(new_shape)
def _assign_right_to_left(left, right):
left[:] = right
numpy_call_dict = {
# assign
"assign": _assign_right_to_left,
# zero_operand ops
# unary ops
"neg": lambda left: -left,
"abs": lambda left: np.abs(left),
"sgn": lambda left: np.sign(left),
"sqrt": lambda left: np.sqrt(left),
"sqr": lambda left: np.square(left),
"exp": lambda left: np.exp(left),
"log": lambda left: np.log(left),
"safelog": lambda left: np.log(np.maximum(left, np.exp(-50.))),
"exp2": lambda left: np.exp2(left),
"log2": lambda left: np.log2(left),
"sig": lambda left: 1. / (1. + np.exp(-left)),
"sig2": lambda left: 1. / (1. + np.exp2(-left)),
"tanh": lambda left: np.tanh(left),
"tanh2": lambda left: (np.exp2(2. * left) - 1.) / (np.exp2(2. * left) + 1.),
"transpose": lambda left: np.transpose(left),
"rint": lambda left: np.rint(left),
# binary ops
"add": lambda left, right: left + right,
"sub": lambda left, right: left - right,
"mul": lambda left, right: left * right,
"div": lambda left, right: left / right,
"eq": lambda left, right: left == right,
"ne": lambda left, right: left != right,
"lt": lambda left, right: left < right,
"le": lambda left, right: left <= right,
"gt": lambda left, right: left > right,
"ge": lambda left, right: left >= right,
"pow": lambda left, right: np.power(left, right),
"minimum": lambda left, right: np.minimum(left, right),
"maximum": lambda left, right: np.maximum(left, right),
"dot": lambda left, right: np.dot(left, right),
# reduction ops
"sum": lambda op_dict, left: np.sum(left, axis=op_dict['axis'], keepdims=True),
"max": lambda op_dict, left: np.max(left, axis=op_dict['axis'], keepdims=True),
"min": lambda op_dict, left: np.min(left, axis=op_dict['axis'], keepdims=True),
"argmax": lambda op_dict, left: CustomNumpy.argmax(left, axis=op_dict['axis'], keepdims=True),
"argmin": lambda op_dict, left: CustomNumpy.argmin(left, axis=op_dict['axis'], keepdims=True),
}
class NervanaCPU(Backend):
"""
Sets up a :mod:`numpy` baseyd backend for matrix ops. By default, we use
32-bit element data types for any arrays constructed.
Attributes:
default_dtype (dtype): default element data type.
tensor_cls: underlying Tensor type. For CPU backend, it will be CPU tensor
See also:
:class:`CPUTensor`
"""
backend_name = 'cpu'
def __init__(self,
rng_seed=None,
default_dtype=np.float32,
hist_bins=64,
hist_offset=-48,
compat_mode=None,
# Ignored
num_devices=None,
stochastic_round=None,
device_id=None,
deterministic=None
):
if default_dtype not in [np.float16, np.float32, np.float64]:
logger.error('Default data type for nervanagpu '
'backend must be float16, 32 or 64')
raise ValueError
super(NervanaCPU, self).__init__(rng_seed, default_dtype, compat_mode=compat_mode)
# ensure an optimized BLAS is present and warn if not
try:
if not any(x in str(np.__config__.blas_opt_info['libraries']).lower()
for x in ['openblas', 'atlas', 'mkl', 'accelerate']):
logger.warn("No accelerated BLAS libraries found, CPU "
"performance may suffer. Consider installing "
"one of openblas, Atlas, MKL, or vecLib")
except (AttributeError, KeyError):
logger.warn("Problems inferring BLAS info, CPU performance may "
"be suboptimal")
self.device_type = 0
self.device_id = 0
self.tensor_cls = CPUTensor
logger.info("Initialized NervanaCPU")
self.hist_bins, self.hist_offset = None, None
self.set_hist_buffers(hist_bins, hist_offset)
self.use_pinned_mem = False
def consume(self, buf_index, hostlist, devlist):
assert 0 <= buf_index < 2, 'Can only double buffer'
if devlist[buf_index] is None:
devlist[buf_index] = self.empty_like(
hostlist[buf_index].T, dtype=hostlist[buf_index].dtype
)
devlist[buf_index][:] = hostlist[buf_index].T
def set_hist_buffers(self, hist_bins, hist_offset):
if (hist_bins != self.hist_bins or hist_offset != self.hist_offset):
self.hist_bins = hist_bins
self.hist_offset = hist_offset
self.hist_max = 4096
self.hist_buf = self.empty((self.hist_max, hist_bins), dtype=np.int32)
self.hist_idx = 0
self.hist_map = dict()
def gen_rng(self, seed=None):
"""
Generate the random number generator on host.
Arguments:
seed (int): random number generator seed
Returns:
seeded numpy RNG
"""
self.rng = np.random.RandomState(seed)
self.init_rng_state = self.rng_get_state()
return self.rng
def rng_set_state(self, state):
"""
Set the RNG state for host RNG.
Arguments:
state (np.array): numpy random number state vector
"""
self.rng.set_state(state)
def rng_get_state(self):
"""
Return the current state of the on-host RNG.
Returns:
np.array: the on-host RNG state vectors
"""
return self.rng.get_state()
def rng_reset(self):
"""
Reset the random state to the state where the Backend is first
initialized.
"""
self.rng_set_state(self.init_rng_state)
def fill_normal(self, ary, mean=0, stdv=1):
"""
Fill ary with normally distributed random numbers.
Arguments:
ary (Tensor): Tensor to fill with random values
mean (float): Mean value. Default 0
stdv (float): standard deviation value. Default 1
"""
ary[:] = np.random.standard_normal(ary.shape) * stdv + mean
def execute(self, optree):
"""
Execute the optree. Break optree into sub-optrees if necessary.
Arguments:
optree: (OpTreeNode): the OpTreeNode object that represents all
the operations
"""
# deal with onehot specially for now
if (len(optree) == 3 and isinstance(optree[2], OpTreeNode) and
optree[2][0]['op'] == 'onehot'):
assert optree[0]['op'] == 'assign'
assert isinstance(optree[1], Tensor)
# get the output buffer
array_output = optree[1]._tensor
# get the output shape and onehot representation length will be on
# this axis
numpy_axis = optree[2][0]['axis']
numpy_ind0 = optree[2][0]['idx']._tensor.squeeze()
numpy_ind_len = numpy_ind0.size
numpy_ind1 = list(range(numpy_ind_len))
# ind for indexing
numpy_ind = np.zeros((2, numpy_ind_len), dtype=np.int32)
numpy_ind[numpy_axis] = numpy_ind0
numpy_ind[1 - numpy_axis] = numpy_ind1
array_output[:] = 0
array_output[numpy_ind.tolist()] = 1
return array_output
# get post order stack
postfix_stack = optree.traverse(list())
# init compute stack
compute_stack = []
# iterate through postfix stack to compute result
for p in postfix_stack:
if isinstance(p, dict):
# TODO add rand and onehot here
if p['op'] in OpCollection.unary_ops:
left = compute_stack.pop()
compute_stack.append(numpy_call_dict[p['op']](left))
elif p['op'] in OpCollection.binary_ops:
right = compute_stack.pop()
left = compute_stack.pop()
compute_stack.append(numpy_call_dict[p['op']](left, right))
elif p['op'] in OpCollection.reduction_ops:
left = compute_stack.pop()
compute_stack.append(numpy_call_dict[p['op']](p, left))
elif p['op'] in OpCollection.zero_operand_ops:
compute_stack.append(numpy_call_dict[p['op']](None))
else:
raise NotImplementedError
elif isinstance(p, CPUTensor):
compute_stack.append(p._tensor)
else:
compute_stack.append(p)
assert len(compute_stack) == 1
return postfix_stack[0]
def empty(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of the CPUTensor class without initializing
individual element values.
Arguments:
shape (int, list): The size of each dimension of the Tensor.
dtype (dtype, optional): Element data type. If not specified we
use default_dtype value
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
CPUTensor: newly created data structure reference
"""
dtype = self.default_dtype if dtype is None else dtype
return self.tensor_cls(
backend=self,
ary=np.zeros(shape, dtype),
dtype=dtype,
name=name,
persist_values=persist_values)
def array(self, ary, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of the CPUTensor class setting each element
value to what is specified in ary.
Arguments:
ary (numpy.ndarray): The data structure containing element values
spread across a number of dimensions. Python
built-in types like ints and lists are
supported.
dtype (dtype, optional): Element data type. If not specified we
use default_dtype value ('float32'
unless overridden).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
CPUTensor: newly created data structure reference
"""
dtype = self.default_dtype if dtype is None else dtype
return self.tensor_cls(
backend=self,
ary=np.array(ary, dtype),
dtype=dtype,
name=name,
persist_values=persist_values)
def zeros(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of the CPUTensor class setting each element
value to 0.
Arguments:
shape (list of ints): The size of each dimension of the Tensor.
dtype (dtype, optional): Element data type. If not specified we
use default_dtype value ('float32'
unless overridden).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
CPUTensor: newly created data structure reference
"""
dtype = self.default_dtype if dtype is None else dtype
return self.tensor_cls(
backend=self,
ary=np.zeros(shape, dtype),
dtype=dtype,
name=name,
persist_values=persist_values)
def ones(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of the CPUTensor class setting each element
value to 1.
Arguments:
shape (list of ints): The size of each dimension of the Tensor.
dtype (dtype, optional): Element data type. If not specified we
use default_dtype value ('float32'
unless overridden).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
CPUTensor: newly created data structure reference
"""
dtype = self.default_dtype if dtype is None else dtype
return self.tensor_cls(
backend=self,
ary=np.ones(shape, dtype),
dtype=dtype,
name=name,
persist_values=persist_values)
def empty_like(self, ary, dtype=None, name=None, persist_values=True):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from ary.
Arguments:
ary (tensor object): Tensor to inherit the dimensions of.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
Tensor: array object
"""
dtype = self.default_dtype if dtype is None else dtype
return self.tensor_cls(
backend=self,
ary=np.zeros(ary.shape, dtype),
dtype=dtype,
name=name,
persist_values=persist_values)
def zeros_like(self, ary, dtype=None, name=None, persist_values=True):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from ary and populating each element with a value of 0.
Arguments:
ary (tensor object): Tensor to inherit the dimensions of.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
Tensor: array object
"""
dtype = self.default_dtype if dtype is None else dtype
return self.tensor_cls(
backend=self,
ary=np.zeros(ary.shape, dtype),
dtype=dtype,
name=name,
persist_values=persist_values)
def compound_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False, bsum=None):
"""
Doing following operations (* is dot product)
C = alpha * A * B + beta * C
C = alpha * A.T * B + beta * C
C = alpha * A * B.T + beta * C.
relu: if true applied before output (and prior to beta addition)
The operation will be short-circuited to: out <- alpha * left * right
if beta has value 0 (the default).
Arguments:
A, B (CPUTensor): input operands
C (CPUTensor): output
alpha (float): scale A*B term
beta (float): scale C term before sum
relu (bool): whether to apply ReLu before output
"""
# checking type and shape
assert A.dtype == B.dtype == C.dtype
assert A.shape[0] == C.shape[0]
assert B.shape[1] == C.shape[1]
assert A.shape[1] == B.shape[0]
# cleaner implementation, shall be equivalent to the one below
# if relu:
# C[:] = self.log(1. + self.exp(alpha * self.dot(A, B))) + beta * C
# else:
# C[:] = alpha * self.dot(A, B) + beta * C
if beta == 0:
if C._tensor.flags['C_CONTIGUOUS'] is not True:
tmp = np.empty(C.shape, dtype=C.dtype)
np.dot(A._tensor, B._tensor, tmp)
C._tensor[:] = tmp.copy()
else:
np.dot(A._tensor, B._tensor, C._tensor)
if relu:
self.Relu(C._tensor, C._tensor)
else:
np.multiply(C._tensor, beta, C._tensor)
tmp = np.empty(C.shape, dtype=C.dtype)
np.dot(A._tensor, B._tensor, tmp)
np.multiply(tmp, alpha, tmp)
if relu:
self.Relu(tmp, tmp)
np.add(C._tensor, tmp, C._tensor)
if bsum is not None:
bsum[:] = self.sum(C, 1)
return C
def batched_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False):
"""
Doing following operations:
1 For fprop: A(K, C), B(X,C,N), C(X,K,N) --> call batched_dot(A, B, C)
2 For bprop: A(K, C), B(X,K,N), C(X,C,N) --> call batched_dot(A.T, B, C)
3 For update: A(X,K,N), B(X,C,N), C(K,C) --> call batched_dot(A, B.T, C).
Arguments:
A, B (CPUTensor): input operands
C (CPUTensor): output
alpha, beta, relu: see usage in dot()
"""
assert A.dtype == B.dtype == C.dtype
dima, dimb, dimc = 0, 0, 0
# ldaz, ldbz, ldcz = 0, 0, 0 # commented for stylecheck
batch_grid, batch_loops = 1, 1
if len(A.shape) == 3:
dima = 1
if len(B.shape) == 3:
dimb = 1
assert dima or dimb, "Tensor A or B must have 3 dims to use batched_dot"
if len(C.shape) == 3:
dimc = 1
batch_grid = C.shape[0]
assert not dima or A.shape[0] == batch_grid
assert not dimb or B.shape[0] == batch_grid
if dima:
batch_loops = A.shape[0]
assert not dimb or B.shape[0] == batch_loops
elif dimb:
batch_loops = B.shape[0]
assert not dima or A.shape[0] == batch_loops
assert A.shape[0 + dima] == C.shape[0 + dimc]
assert B.shape[1 + dimb] == C.shape[1 + dimc]
assert A.shape[1 + dima] == B.shape[0 + dimb]
tmp = np.zeros(C.shape)
for i in range(batch_loops):
if dima:
tmp += np.dot(A._tensor[i], B._tensor[i])
else:
tmp[i] = np.dot(A._tensor, B._tensor[i])
np.multiply(tmp, alpha, tmp)
if relu:
self.Relu(tmp, tmp)
np.add(C._tensor * beta, tmp, C._tensor)
return C
def xnor_compound_dot(self, A, B, C, beta=0.0, bsum=None):
"""
Performs XNOR GEMM
C = A * B
Arguments:
A (Tensor): left-hand side operand.
B (Tensor): right-hand side operand.
C (Tensor): output operand
"""
# checking type and shape
assert A.dtype == B.dtype == C.dtype
assert A.shape[0] == C.shape[0]
assert B.shape[1] == C.shape[1]
assert A.shape[1] == B.shape[0]
np.dot(A._tensor, B._tensor, C._tensor)
if bsum is not None:
bsum[:] = self.sum(C, 1)
return C
def copy_transpose(self, a, out, axes=None, repeat=1):
"""
Function to perform a fast copy transpose/dimshuffle operation.
Works just like numpy.transpose, but requires an output tensor argument.
"""
out._tensor[:] = np.transpose(a._tensor, axes).copy()
def make_binary_mask(self, out, keepthresh=0.5):
"""
Create a binary mask for dropout layers.
Arguments:
out (CPUTensor): Output tensor
keepthresh (float): fraction of ones
"""
out._tensor[:] = np.array(
self.rng.uniform(size=out._tensor.shape) < keepthresh,
dtype=out._tensor.dtype)
def conv_layer(self, dtype,
N, C, K,
D=1, H=1, W=1,
T=1, R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1):
"""
Create a new ConvLayer parameter object.
This then is passed as an argument to all the convolution operations.
N: Number of images in mini-batch
C: Number of input feature maps
K: Number of output feature maps
D: Depth of input image
H: Height of input image
W: Width of input image
T: Depth of filter kernel
R: Height of filter kernel
S: Width of filter kernel
padding: amount of zero-padding around the given edge
strides: factor to step the filters by in a given direction
dtype: need to know dtype to setup proper kernels and params.
bsum: calculate the sum along the batchnorm axis for fprop or bprop
outputs an fp32 tensor of size Kx1
"""
return ConvLayer(self, dtype, N, C, K, D, H, W, T, R, S,
pad_d, pad_h, pad_w, str_d, str_h, str_w)
def fprop_conv(self, layer, I, F, O,
X=None, bias=None, bsum=None,
alpha=1.0, beta=0.0,
relu=False, brelu=False, slope=0.0):
"""
Forward propagate the inputs of a convolutional network layer to
produce output.
Arguments:
layer: the conv layer as a parameter object
I (CPUTensor): inputs
F (CPUTensor): the weights (filters)
O (CPUTensor): outputs
Compounding Options:
X: tensor to use in bprop_relu or beta
can be same as O for beta accumulate (this is default when None)
should be same shape as O
bias: (K,1) tensor to use for adding bias to output
O += bias
bsum: (K,1) tensor to accumulate batch sum over (used in batchnorm or bprop_bias)
bsum = sum(O.reshape(K,-1), axis=1)
the sum operation is fully deterministic
alpha, beta:
O = alpha*O + beta*X
O = alpha*O + beta*O (if X==O)
relu, slope: boolean flag to apply:
O = max(O, 0) + beta*min(O, 0)
can be combined with bias (where bias is added first)
brelu, slope: boolean flag to apply:
O *= (X > 0) + beta*(X < 0)
can be combined with bsum tensor to output bprop_bias
"""
layer.xprop_conv(I, F, O, X, bias, bsum, alpha, beta, relu, brelu, slope)
def bprop_conv(self, layer, F, E, grad_I,
X=None, bias=None, bsum=None,
alpha=1.0, beta=0.0,
relu=False, brelu=False, slope=0.0):
"""
Backward propagate the error through a convolutional network layer.
Arguments:
layer: the conv layer as a parameter object
F (CPUTensor): the weights (filters)
E (CPUTensor): errors
grad_I (CPUTensor): gradient to inputs (output delta)
Compounding Options:
X: tensor to use in bprop_relu or beta
can be same as grad_I for beta accumulate (this is default when None)
should be same shape as grad_I
bias: (K,1) tensor to use for adding bias to output
grad_I += bias
bsum: (K,1) tensor to accumulate batch sum over (used in batchnorm or bprop_bias)
bsum = sum(grad_I.reshape(K,-1), axis=1)
the sum operation is fully deterministic
alpha, beta:
grad_I = alpha*grad_I + beta*X
grad_I = alpha*grad_I + beta*grad_I (if X==grad_I)
relu, slope: boolean flag to apply:
grad_I = max(grad_I, 0) + slope*min(grad_I, 0)
can be combined with bias (where bias is added first)
brelu, slope: boolean flag to apply:
grad_I *= (X > 0) + slope*(X < 0)
can be combined with bsum tensor to output bprop_bias
"""
layer.xprop_conv(E, F, grad_I, X, bias, bsum, alpha, beta, relu, brelu, slope,
backward=True)
def update_conv(self, layer, I, E, U, alpha=1.0, beta=0.0):
"""
Compute the updated gradient for a convolutional network layer.
Arguments:
layer: the conv layer as a parameter object
I (CPUTensor): the inputs
E (CPUTensor): the errors
U (CPUTensor): the updates
alpha (float): linear scaling
beta (float): scaled accumulation
"""
assert layer.sizeI == I.size
assert layer.sizeO == E.size
assert layer.sizeF == U.size
layer.update_conv(I, E, U, alpha, beta)
def deconv_layer(self, dtype,
N, C, K,
P, Q,
R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1):
"""
Create a new DeconvLayer parameter object.
This then is passed as an argument to all the convolution operations.
N: Number of images in mini-batch
C: Number of output feature maps
K: Number of input feature maps
P: Height of input
Q: Width of input
D: Depth of output image
H: Height of output image
W: Width of output image
T: Depth of filter kernel
R: Height of filter kernel
S: Width of filter kernel
padding: amount of zero-padding around the given edge
strides: factor to step the filters by in a given direction
dtype: need to know dtype to setup proper kernels and params.
"""
return DeconvLayer(self, dtype, N, C, K, P, Q, R, S,
pad_d, pad_h, pad_w, str_d, str_h, str_w)
def lrn_layer(self, dtype, N, C, D=1, H=1, W=1, J=1):
"""
Create a new PoolLayer parameter object.
This then is passed as an argument to all pooling kernels.
N: Number of images in mini-batch
C: Number of input feature maps
H: Height of input image
W: Width of input image
J: Size of feature map pooling window (maxout n_pieces)
padding: amount of zero-padding around the given image or feature map edge
strides: factor to step the window by in a given direction (overlap allowed)
Leave spatial dimensions at 1 to allow feature map pooling in the fc layers.
"""
assert J % 2 == 1, "Only support odd LRN window size"
pad_c = J // 2
op = 'lrn'
# Bunch of defaults since we're only interested in the k-axis
lrn_opts = dict(T=1, R=1, S=1,
pad_c=pad_c,
pad_d=0, pad_h=0, pad_w=0,
str_c=1, str_d=1, str_h=1, str_w=1)
return PoolLayer(self, dtype, op, N, C, D, H, W, J, **lrn_opts)
def fprop_lrn(self, layer, I, O, denom, alpha=None, beta=None, ascale=1, bpower=1):
"""
Forward propagate pooling layer.
Arguments:
layer (PoolLayer): The pool layer object, different backends have
different pool layers.
I (Tensor): Input tensor.
O (Tensor): output tensor.
denom (Tensor): denominator tensor, stores the result of the squared pooling/contrast
ascale (float): scaling parameter (alpha) to multiply the pooled sum (1.25e-5 in AK)
bpower (float): exponential parameter (beta) to raise denominator by (0.75 in AK)
"""
assert layer.sizeI == I.size
assert layer.sizeO == O.size
J, T, R, S = layer.JTRS
C, D, H, W, N = layer.dimI
K, M, P, Q, N = layer.dimO
pad_c, pad_d, pad_h, pad_w = layer.padding
str_c, str_d, str_h, str_w = layer.strides
array_I = I._tensor.reshape(layer.dimI)
array_O = O._tensor.reshape(layer.dimO) # _tensor to write to
# although we can calculate directly into O, keeping denom around is useful for bprop
array_d = denom._tensor.reshape(layer.dimO) # _tensor to write to
for k in range(K):
sliceC, _ = layer.kSlice[k]
_ascale = ascale / J
for m in range(M):
sliceD, _ = layer.mSlice[m]
for p in range(P):
sliceH, _ = layer.pSlice[p]
for q in range(Q):
sliceW, _ = layer.qSlice[q]
sliceI = array_I[sliceC, sliceD, sliceH, sliceW, :].reshape(-1, N)
array_d[k, m, p, q, :] = 1 + _ascale * np.sum(np.square(sliceI), axis=0)
array_O[:] = array_I * np.power(array_d, -bpower) # elementwise divide by denominator
def bprop_lrn(self, layer, I, O, E, delta, denom, alpha=None, beta=None, ascale=1, bpower=1):
"""
Backward propagate pooling layer.
Arguments:
layer (PoolLayer): The pool layer object. Different backends have
different pool layers.
I (Tensor): Input tensor.
E (Tensor): Error tensor.
delta (Tensor): Gradient tensor (delta)
denom (Tensor): denominator tensor computed during bprop
ascale (float): scaling parameter (alpha) to multiply the pooled sum (1.25e-5 in AK)
bpower (float): exponential parameter (beta) to raise denominator by (0.75 in AK)
"""
assert layer.sizeI == I.size
assert layer.sizeO == E.size
assert layer.sizeI == delta.size
J, T, R, S = layer.JTRS
C, D, H, W, N = layer.dimI
K, M, P, Q, N = layer.dimO
pad_c, pad_d, pad_h, pad_w = layer.padding
str_c, str_d, str_h, str_w = layer.strides
array_I = I._tensor.reshape(layer.dimI)
array_E = E._tensor.reshape(layer.dimO)
array_O = O._tensor.reshape(layer.dimO)
array_delta = delta._tensor.reshape(layer.dimI) # write to
array_denom = denom._tensor.reshape(layer.dimO)
for k in range(K):
sliceC, _ = layer.kSlice[k]
for m in range(M):
sliceD, _ = layer.mSlice[m]
for p in range(P):
sliceH, _ = layer.pSlice[p]
for q in range(Q):
sliceW, _ = layer.qSlice[q]
_O = array_O[sliceC, sliceD, sliceH, sliceW, :].reshape(-1, N)
_E = array_E[sliceC, sliceD, sliceH, sliceW, :].reshape(-1, N)
_den = array_denom[sliceC, sliceD, sliceH, sliceW, :].reshape(-1, N)
# temporarily store part of the derivative in here
array_delta[k, m, p, q, :] = np.sum(_O * _E / _den, axis=0)
array_delta[:] = -2 * bpower * (ascale / float(J)) * array_delta * array_I + (
array_E * np.power(array_denom, -bpower))
def pool_layer(self, dtype,
op, N, C,
D=1, H=1, W=1,
J=1, T=1, R=1, S=1,
pad_c=0, pad_d=0, pad_h=0, pad_w=0,
str_c=None, str_d=None, str_h=None, str_w=None):
"""
Create a new PoolLayer parameter object.
This then is passed as an argument to all pooling kernels.
op: "max", "avg", "l2" pooling (currently bprop only supports max, but not avg and l2)
N: Number of images in mini-batch
C: Number of input feature maps
D: Depth of input image
H: Height of input image
W: Width of input image
J: Size of feature map pooling window (maxout n_pieces)
T: Depth of pooling window
R: Height of pooling window
S: Width of pooling window
padding: amount of zero-padding around the given image or feature map edge
strides: factor to step the window by in a given direction (overlap allowed)
Leave spatial dimensions at 1 to allow feature map pooling in the fc layers.
"""
# default to non-overlapping
if str_c is None:
str_c = J
if str_d is None:
str_d = T
if str_h is None:
str_h = R
if str_w is None:
str_w = S
return PoolLayer(self, dtype, op, N, C, D, H, W, J, T, R, S,
pad_c, pad_d, pad_h, pad_w, str_c, str_d, str_h, str_w)
def fprop_pool(self, layer, I, O, argmax=None, beta=0.0):
"""
Forward propagate pooling layer.
Arguments:
layer (PoolLayer): The pool layer object, different backends have
different pool layers.
I (Tensor): Input tensor.
O (Tensor): output tensor.
argmax (Tensor): tensor to store location of the maximum
"""
assert layer.sizeI == I.size
assert layer.sizeO == O.size
if layer.op == "max":
assert layer.sizeO == argmax.size
op = layer.op
J, T, R, S = layer.JTRS
C, D, H, W, N = layer.dimI
K, M, P, Q, N = layer.dimO
pad_c, pad_d, pad_h, pad_w = layer.padding
str_c, str_d, str_h, str_w = layer.strides
array_I = I._tensor.reshape(layer.dimI)
array_O = O._tensor.reshape(layer.dimO)
if op == "max":
array_argmax = argmax._tensor.reshape(layer.dimO)
for k in range(K):
sliceC, _ = layer.kSlice[k]
for m in range(M):
sliceD, _ = layer.mSlice[m]
for p in range(P):
sliceH, _ = layer.pSlice[p]
for q in range(Q):
sliceW, _ = layer.qSlice[q]
sliceI = array_I[sliceC, sliceD, sliceH, sliceW, :].reshape(-1, N)
if op == "max":
array_argmax[k, m, p, q, :] = np.argmax(sliceI, axis=0)
array_O[k, m, p, q, :] = array_O[k, m, p, q, :] * beta + \
np.max(sliceI, axis=0)
elif op == "avg":
array_O[k, m, p, q, :] = array_O[k, m, p, q, :] * beta + \
np.mean(sliceI, axis=0)
elif op == "l2":
array_O[k, m, p, q, :] = array_O[k, m, p, q, :] * beta + \
np.sqrt(np.sum(np.square(sliceI), axis=0))
def bprop_pool(self, layer, I, O, argmax=None, alpha=1.0, beta=0.0):
"""
Backward propagate pooling layer.
Arguments:
layer (PoolLayer): The pool layer object. Different backends have
different pool layers.
I (Tensor): Input (error) tensor.
O (Tensor): Output (delta) tensor.
argmax (Tensor): tensor to store location of the maximum
alpha (float): linear scaling (does not work for l2 pooling)
beta (float): accumulation value into grad_I
"""
assert layer.sizeI == O.size
assert layer.sizeO == I.size
if layer.op == "max":
assert layer.sizeO == argmax.size
op = layer.op
J, T, R, S = layer.JTRS
C, D, H, W, N = layer.dimI
K, M, P, Q, N = layer.dimO
pad_c, pad_d, pad_h, pad_w = layer.padding
str_c, str_d, str_h, str_w = layer.strides
array_E = I._tensor.reshape(layer.dimO)
array_E[:] = array_E * alpha
array_delta = O._tensor.reshape(layer.dimI)
array_delta[:] = array_delta * beta
if op == "max":
array_argmax = argmax._tensor.reshape(layer.dimO)
for k in range(K):
sliceC, clen = layer.kSlice[k]
for m in range(M):
sliceD, dlen = layer.mSlice[m]
for p in range(P):
sliceH, hlen = layer.pSlice[p]
for q in range(Q):
sliceW, wlen = layer.qSlice[q]
patch_in = (sliceC, sliceD, sliceH, sliceW, slice(None))
patch_out = (k, m, p, q, slice(None))
sliceB = array_delta[patch_in].reshape((-1, N))
if op == "max":
max_n = array_argmax[patch_out]
sliceB[max_n, list(range(N))] += array_E[patch_out]
elif op == "avg":
sliceB += array_E[patch_out] * (1.0 / sliceB.shape[0])
else:
raise NotImplementedError
array_delta[patch_in] = sliceB.reshape((clen, dlen, hlen, wlen, N))
def _roipooling_slice(self, h, stride, H, roi_offset):
"""
Slicing for ROIPooling along one dimension.
h: is the index on the pooled map (output index)
stride:
H: the max of the input map
roi_offset: how far hstart is from 0
"""
hstart = int(np.floor(float(h) * stride))
hend = int(np.ceil(float(h + 1) * stride))
hstart = min(max(hstart + roi_offset, 0), H)
hend = min(max(hend + roi_offset, 0), H)
return slice(hstart, hend), hend - hstart
def roipooling_fprop(self, I, rois, O, argmax, roi_count, C, H, W,
pooled_height, pooled_width, spatial_scale):
"""
Function to perform fprop of ROIPooling
Arguments:
I (Tensor): (C, H, W, N)
rois (Tensor): (ROIs, 5)
O (Tensor): (C, pooled_height, pooled_width, roi_count)
argmax (Tensor): (C, pooled_height, pooled_width, roi_count)
"""
assert I.size == C * H * W * self.bsz,\
"ROIPooling input feature map size do not match"
assert O.size == argmax.size == C * pooled_height * pooled_width * roi_count,\
"ROIPooling output shape do not match"
assert rois.shape[1] == 5, "ROIs should be on the row dimension"
assert rois.shape[0] == roi_count, "ROIs do not match with roi count"
array_fm = I._tensor.reshape(C, H, W, self.bsz)
array_rois = rois._tensor
array_O = O._tensor.reshape(C, pooled_height, pooled_width, roi_count)
array_argmax = argmax._tensor.reshape(C, pooled_height, pooled_width, roi_count)
array_O[:] = 0
array_argmax[:] = -1
# combine the feature map with ROIs
for b_id in xrange(roi_count):
[idx, xmin, ymin, xmax, ymax] = array_rois[b_id]
xmin = int(round(xmin * spatial_scale))
xmax = int(round(xmax * spatial_scale))
ymin = int(round(ymin * spatial_scale))
ymax = int(round(ymax * spatial_scale))
roi_width = max(xmax - xmin + 1, 1)
roi_height = max(ymax - ymin + 1, 1)
stride_h = float(roi_height) / float(pooled_height)
stride_w = float(roi_width) / float(pooled_width)
for h_out in xrange(pooled_height):
sliceh, lenh = self._roipooling_slice(h_out, stride_h, H, ymin)
if sliceh.stop <= sliceh.start:
continue
for w_out in xrange(pooled_width):
slicew, lenw = self._roipooling_slice(w_out, stride_w, W, xmin)
if slicew.stop <= slicew.start:
continue
else:
array_I = array_fm[:, sliceh, slicew, int(idx)].reshape(C, -1)
array_O[:, h_out, w_out, b_id] = np.max(array_I, axis=1)
# get the max idx respect to feature_maps coordinates
max_idx_slice = np.unravel_index(np.argmax(array_I, axis=1), (lenh, lenw))
max_idx_slice_h = max_idx_slice[0] + sliceh.start
max_idx_slice_w = max_idx_slice[1] + slicew.start
max_idx_slice = max_idx_slice_h * W + max_idx_slice_w
array_argmax[:, h_out, w_out, b_id] = max_idx_slice
def roipooling_bprop(self, I, rois, O, argmax, roi_count, C, H, W,
pooled_height, pooled_width, spatial_scale):
"""
Function to perform bprop of ROIPooling.
Arguments:
I (Tensor): input errors (C, pooled_height, pooled_width, roi_count)
argmax (Tensor): max args from the fprp (C, pooled_height, pooled_width, roi_count)
rois (Tensor): (ROIs, 5)
O (Tensor): output deltas (C, H, W, N)
"""
assert I.size == argmax.size == C * pooled_height * pooled_width * roi_count,\
"ROIPooling bprop input size do not match"
assert O.size == C * H * W * self.bsz,\
"ROIPooling bprop output size do not match"
assert rois.shape[1] == 5, "ROIs should be on the row dimension"
assert rois.shape[0] == roi_count, "ROIs do not match with roi count"
array_E = I._tensor.reshape(C, pooled_height, pooled_width, roi_count)
array_rois = rois._tensor
array_delta = O._tensor.reshape(C, H, W, self.bsz)
array_argmax = argmax._tensor.reshape(C, pooled_height, pooled_width, roi_count)
array_delta[:] = 0
for b_id in xrange(roi_count):
[idx, xmin, ymin, xmax, ymax] = array_rois[b_id]
xmin = int(round(xmin * spatial_scale))
xmax = int(round(xmax * spatial_scale))
ymin = int(round(ymin * spatial_scale))
ymax = int(round(ymax * spatial_scale))
roi_width = max(xmax - xmin + 1, 1)
roi_height = max(ymax - ymin + 1, 1)
stride_h = float(roi_height) / float(pooled_height)
stride_w = float(roi_width) / float(pooled_width)
# iterate all the w, h (from feature map) that fall into this ROIs
for w in range(xmin, xmax + 1):
for h in range(ymin, ymax + 1):
phstart = int(np.floor(float(h - ymin) / stride_h))
phend = int(np.ceil(float(h - ymin + 1) / stride_h))
pwstart = int(np.floor(float(w - xmin) / stride_w))
pwend = int(np.ceil(float(w - xmin + 1) / stride_w))
phstart = min(max(phstart, 0), pooled_height)
phend = min(max(phend, 0), pooled_height)
pwstart = min(max(pwstart, 0), pooled_width)
pwend = min(max(pwend, 0), pooled_width)
for ph in range(phstart, phend):
for pw in range(pwstart, pwend):
max_idx_tmp = array_argmax[:, ph, pw, b_id]
for c in range(C):
if max_idx_tmp[c] == (h * W + w):
array_delta[c, h, w, int(idx)] += array_E[c, ph, pw, b_id]
def nms(self, detections, threshold):
"""
Function to perform non-maximal supression.
Arguments:
detections (Tensor): detection boxes (box_count, 5), each row has
(x1, y1, x2, y2, score). Assume the boxes have already
been sorted based on score in descending order
output_mask (Tensor): pre-allocated buffer for mask output from the kernel
box_count (int): number of boxes
threshold (float): box overlap threshold, boxes with smaller overlaps will be kept
Outputs:
keep_ind (list): list of indices
"""
dets = detections.get()
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= threshold)[0]
order = order[inds + 1]
return keep
def compound_fprop_bn(self, x, xsum, xvar, gmean, gvar, gamma, beta, y, eps, rho,
accumbeta=0.0, relu=False, binary=False):
"""
Function to perform batch normalization forward pass. Included
for API compatibility with GPU compound kernel call.
Arguments:
x (Tensor): Input from previous layer
xsum (Tensor): Precomputed batch sum over PQN dimension
xvar (Tensor): Buffer for variance (computed in kernel)
gmean (Tensor): global mean ()
gvar (Tensor): global variance
gamma (Tensor): scale parameter
beta (Tensor): location paramter
y (Tensor): normalized output
eps (float): constant for numerical stability
rho (float): exponential window averaging constant
"""
xvar[:] = self.var(x, axis=1, binary=binary)
xsum[:] = xsum / x.shape[1] # reuse xsum instead of computing xmean
gmean[:] = gmean * rho + (1.0 - rho) * xsum
gvar[:] = gvar * rho + (1.0 - rho) * xvar
if binary:
xhat = self.shift(x - xsum, 1.0 / self.sqrt(xvar + eps))
outputs = y.reshape(xhat.shape)
outputs[:] = self.shift(xhat, gamma) + beta
else:
xhat = (x - xsum) / self.sqrt(xvar + eps)
outputs = y.reshape(xhat.shape)
outputs[:] = xhat * gamma + beta
def compound_bprop_bn(self, delta_out, grad_gamma, grad_beta, delta_in, x, xsum, xvar,
gamma, eps, binary=False):
"""
Function to perform batch normalization backward pass. Included
for API compatibility with GPU compound kernel call.
Arguments:
delta_out (Tensor): Delta buffer to write out to
grad_gamma (Tensor): Gradient w.r.t. gamma
grad_beta (Tensor): Gradient w.r.t. beta
delta_in (Tensor): Delta buffer to read from (incoming errors)
x (Tensor): feedforward input
xsum (Tensor): Batch sum over PQN dimension
xvar (Tensor): Batch variance
gamma (Tensor): scale parameter
eps (float): constant for numerical stability
binary (bool): Binary shift based computations
"""
if binary:
op = self.shift
else:
def multiply(left, right):
return left * right
op = multiply
inv_v = 1.0 / self.sqrt(xvar + eps)
xhat = op(x - xsum, inv_v)
grad_gamma[:] = self.sum(xhat * delta_in, axis=1)
grad_beta[:] = self.sum(delta_in, axis=1)
xtmp = (op(xhat, grad_gamma) + grad_beta) / float(x.shape[1])
delta_out.reshape(delta_in.shape)[:] = op(op(delta_in - xtmp, gamma), inv_v)
def compound_bprop_lut(self, nin, inputs, error, error_t, dW, pad_idx, alpha=1.0, beta=0):
"""
Backward propagate lookup table layer.
Arguments:
nin (int): Number of input word_ids.
inputs (Tensor): Input tensor.
error (Tensor): Error tensor.
error_t (Tensor): Transposed error tensor.
dW (Tensor): Gradient tensor (delta).
pad_idx (int):
alpha (float):
beta (float):
"""
wrd_ids = inputs._tensor[0]
unqidx, inv = np.unique(wrd_ids, return_inverse=True)
groups = [np.where(inv == i) for i in range(len(unqidx))]
for (wrd_id, group) in zip(unqidx, groups):
if wrd_id != pad_idx:
dW[wrd_id, :] = self.sum(error.take(group[0], axis=1), axis=1)
"""
alternative bprop
for (j, wrd_id) in enumerate(wrd_ids):
dW[:, wrd_id] = dW[:, wrd_id] + error[:, j]
"""
def _hist_tensor(self, tag):
"""
Create a tensor the right size for histogram data, with memory allocated
in the contiguous histogram buffer. Track it by tag for later reference.
"""
assert self.hist_idx < self.hist_max
self.hist_map[tag] = (self.hist_idx)
hist_buf = self.hist_buf[self.hist_idx]
self.hist_idx += 1
return hist_buf
def dump_hist_data(self):
hist_data = self.hist_buf
hist_map = self.hist_map
self.hist_map = dict()
self.hist_idx = 0
self.hist_buf = self.empty(
(self.hist_max, self.hist_bins), dtype=np.int32)
return hist_data, hist_map
def Relu(self, ary, out=None):
"""
Calculates the ReLu transformation for input array.
Arguments:
ary: numpy array
out: reference to output
"""
if out is not None:
return np.maximum(ary, 0, out)
else:
return np.maximum(ary, 0)
def binarize(self, ary, out, stochastic=True):
"""
Binarizes input array
Arguments:
ary: tensor
out: reference to output
stochastic: stochastic or deterministic
"""
if stochastic:
out[:] = (ary + 1)/2.0
self.clip(out, 0, 1, out)
prob = self.array(np.random.uniform(0, 1, size=ary.shape))
self.less_equal(prob, out, out)
else:
self.greater_equal(ary, 0, out)
out[:] = 2 * out - 1
return out
def shift(self, ary, shift_ary, value=True, out=None):
"""
Shifts input array
Arguments:
ary: tensor
shift_ary: tensor of shift amount
out: reference to output
"""
if value:
exp = self.rint(self.safelog(self.absolute(shift_ary))/self.log(2))
ap2 = self.multiply(self.sgn(shift_ary), self.exp2(exp))
else:
ap2 = self.exp2(shift_ary)
if out is None:
if hasattr(ary, 'shape'):
out = self.empty_like(ary)
else:
out = self.empty((1, 1))
out[:] = self.multiply(ary, ap2)
return out
def init_mark(self):
"""
Generate a timing mark object.
Returns:
timing mark (dict)
"""
return {'time': 0}
def record_mark(self, marker):
"""
Mark the current time.
Arguments:
marker (time mark): timing mark generated by init_mark()
"""
marker['time'] = time.time()
def synchronize_mark(self, marker):
"""
Synchronize on the given marker.
Arguments:
marker (time mark): timing mark generated by init_mark()
"""
# No-op on cpu
return
def get_time(self, start, end):
"""
Return time between start and end marks.
Arguments:
start (time maker): start time mark
end (time marker): end time mark
Returns:
time elapsed between start and end time marks in milliseconds
"""
return (end['time'] - start['time']) * 1000.0
|
# ----------------------------------------------------------------------------
# Copyright 2014-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Our CPU based backend interface and tensor data structure. Our implementation
wraps :mod:`numpy` ndarray and related operations
"""
from __future__ import division
from builtins import object, round, str, zip
import numpy as np
import logging
import time
from neon.backends.backend import Tensor, Backend, OpTreeNode, OpCollection
from neon.backends.layer_cpu import ConvLayer, DeconvLayer, PoolLayer
from neon.util.compat import xrange
_none_slice = slice(None, None, None)
logger = logging.getLogger(__name__)
# TODO: enable this flag to find numerical problems
# np.seterr(all='raise')
class CPUTensor(Tensor):
"""
The n-dimensional array data structure that resides in host memory,
and is meant to be manipulated on the CPU. wrapped `numpy.ndarray` tensor.
Arguments:
dtype (numpy.ndtype, optional): underlying data type of the elements.
ary (data array, optional): optionally it can be instantiated with
a data array
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor will
persist across multiple begin and end
calls. Setting to False may provide a
performance increase if values do
not need to be maintained across such
calls
See also:
:class:`NervanaCPU` class
"""
_tensor = None
def __init__(self,
backend,
shape=None,
dtype=np.float32,
ary=None,
name=None,
persist_values=True,
base=None):
super(CPUTensor, self).__init__(backend, shape, dtype, name,
persist_values)
# supported dtypes
assert dtype in (np.float16, np.float32, np.float64, np.uint8, np.int8,
np.uint16, np.int16, np.uint32, np.int32)
dtype = np.dtype(dtype)
if type(ary) != np.ndarray:
self._tensor = np.array(ary, dtype)
elif ary.dtype != dtype:
self._tensor = ary.astype(dtype)
else:
self._tensor = ary
while self._tensor.ndim < self._min_dims:
self._tensor = self._tensor.reshape(self._tensor.shape + (1, ))
if shape is not None and len(shape) < self._min_dims:
self.shape = shape + (1, )*(self._min_dims - len(shape))
else:
self.shape = self._tensor.shape
shape_ = []
size = 1
for dim in self.shape:
if int(dim) != dim:
raise TypeError('shape dims must be integer values [%s]' % str(dim))
dim = int(dim)
shape_.append(dim)
size *= dim
self.shape = tuple(shape_)
self.size = size
self.base = base
self.dtype = dtype
self.is_contiguous = self._tensor.flags.c_contiguous
def __str__(self):
"""
Returns a string representation of this Tensor.
Returns:
str: the representation.
"""
if self._tensor.base is not None:
base_id = id(self._tensor.base)
else:
base_id = id(self._tensor)
return ("CPUTensor(base 0x%x) name:%s shape:%s dtype:%s strides:%s"
" is_c_contiguous:%s" % (base_id, self.name, self.shape,
self.dtype, self._tensor.strides,
self._tensor.flags.c_contiguous))
def __repr__(self):
"""
Returns a more unambiguous string representation of the Tensor.
Returns:
str: the representation.
"""
return self.__str__()
def __len__(self):
"""
Return the size of the leading dimension of self.
"""
if len(self.shape):
return self.shape[0]
else:
return 0
def __setitem__(self, key, value):
"""
Assign the specified value to a subset of elements found via slice
style indexing along each dimension. e.g. A[5:10, :] = 4.5.
Each slice consists of start_idx:stop_idx:step_size triplets. If
step_size isn't specified it defaults to 1. If start_idx isn't
specified it defaults to 0. If stop_idx isn't specified it defaults
to the total number of elements along that dimension. As such a slice
value of ':' allows one to select all elements along that dimension.
Arguments:
key (int, slice, tuple): indices of each dimension's slice.
value (numeric array, CPUTensor): values to be assigned to the
extracted element subset. If an
array it should be the same shape
as what key indexes (or be
broadcastable as such).
"""
self.__getitem__(key)._assign(value)
return self
def __getitem__(self, key):
"""
Extract a subset view of the items via slice style indexing
along each dimension. e.g. A[5:10, :]. Each slice consists of
start_idx:stop_idx:step_size triplets. If step_size isn't specified it
defaults to 1. If start_idx isn't specified it defaults to 0. If
stop_idx isn't specified it defaults to the total number of elements
along that dimension. As such a slice value of ':' allows one to
select all elements along that dimension. To be consistent with GPU
Tensors, CPU Tensors remove the axis that has size 1 unless it needs to
maintain 2D.
Arguments:
key (int, slice, tuple): indices of each dimension's slice.
Returns:
CPUTensor: view of self corresponding to the subset items.
"""
# speed up common case of [:]
if not isinstance(key, tuple):
if key == _none_slice:
return self
key = (key,)
# ensure we return a view
# exact same behavior as cpu
# let a.shape = (3,4)
# a[1,1] = 10 # cpu, gpu and numpy
# type(a[1,1]) # for cpu and gpu type is Tensor; for numpy type is float
key_list = list(key)
for idx, k in enumerate(key):
if type(k) is int:
k = self.shape[idx] + k if k < 0 else k
key_list[idx] = slice(k, k + 1, None)
key = tuple(key_list)
new_shape = list(self._tensor[key].shape)
for idx, k in enumerate(new_shape):
if len(new_shape) > 2 and k is 1:
new_shape.remove(k)
# return a view of the tensor
return self.__class__(
backend=self.backend,
ary=self._tensor[key].reshape(new_shape),
dtype=self._tensor.dtype,
base=self)
def _assign(self, value):
"""
Assign an input value to the CPU tensor. The NervanaCPU does clipping
for int and uint types, when overflow happens
Arguments:
value (CPUTensor, OpTreeNode, numeric): the value to be assigned.
"""
if isinstance(value, (CPUTensor, OpTreeNode)):
OpTreeNode.build("assign", self, value)
elif isinstance(value, (int, float, np.ndarray)):
self.set(value)
else:
raise TypeError("Invalid type for assignment: %s" % type(value))
return self
def set(self, value):
"""
Wrap the value into NervanaCPU tensor.
Arguments:
value: Array or single input. If it is array, check and Convert
the dtype and shape. If it is single value, broadcast to
the memory
Returns:
self
"""
if isinstance(value, np.ndarray):
if value.dtype is not self.dtype:
value = value.astype(self.dtype)
assert value.size == self.size
if value.ndim < self._min_dims:
value = value.reshape(self.shape)
self._tensor[:] = value
return self
def get(self):
"""
Return the array.
"""
return self._tensor.copy()
def raw(self):
"""
Access the raw buffer.
Returns:
pointer: A device specific pointer
"""
return self._tensor.ctypes.data
def asnumpyarray(self):
"""
Deprecated.
Scheduled to be removed in 2.0.
Use get() instead.
"""
return self._tensor
def take(self, indices, axis=None):
"""
Select a subset of elements from an array across an axis.
Arguments:
indices (Tensor, numpy ndarray): indicies of elements to select
axis (int): axis across which to select the values
Returns:
Tensor: Tensor with selected values
"""
if type(indices) == self.__class__:
indices = indices._tensor
# if indices are nx1 or 1xn, much of our code assumes these dims are
# collapsed, hence the squeeze call.
if type(indices) == np.ndarray:
indices = indices.squeeze()
new_shape = list(self.shape)
new_shape[axis] = indices.size
return self.__class__(
backend=self.backend,
ary=self._tensor.take(indices, axis).reshape(new_shape),
dtype=self._tensor.dtype,
base=self)
def fill(self, value):
"""
Assign specified value to each element of this CPUTensor.
Arguments:
value (numeric): The value to be assigned to each element.
Return:
CPUTensor: updated view of the data.
"""
self._tensor.fill(value)
return self
def copy(self, a):
"""
Construct and return a deep copy of the Tensor passed.
Arguments:
a (Tensor): the object to copy
Returns:
Tensor: new array object with the same values as input tensor
"""
return self._assign(a)
def copy_from(self, a):
"""
Alias of copy.
Arguments:
a (Tensor): the object to copy
Returns:
Tensor: new array object with the same values as input tensor
"""
return self._assign(a)
def reshape(self, *shape):
"""
Return a reshaped view.
"""
if isinstance(shape[0], (tuple, list)):
shape = tuple(shape[0])
if shape == self.shape:
return self
return self.__class__(
backend=self.backend,
ary=self._tensor.reshape(shape),
dtype=self._tensor.dtype,
base=self)
@property
def T(self):
"""
Return a transposed view.
For 2D tensor, will do a normal transpose
For 3D tensor, will keep the 0 dim, swap the 1 and 2 dimensions
"""
if len(self.shape) <= 2:
ary = self._tensor.transpose()
else:
# support for batched dot.
# perserve outer dimension but reverse inner dims
# shape = np.concatenate((shape[-1:], shape[:-1])
ary = self._tensor.swapaxes(1, 2)
return self.__class__(
backend=self.backend,
ary=ary,
dtype=self._tensor.dtype,
base=self)
def transpose(self, out=None):
"""
Return a transposed view of the data. Alias of .T property
"""
if out:
return OpTreeNode.build("assign", out, self.T)
return self.T
def share(self, shape, dtype=None, name=None):
"""
Return a view: ary, where ary.size <= self.size.
Allows easy sharing of temporary memory
This is mostly provided for compatibility, -- dtype is ignored
"""
size = np.prod(shape)
if size > self.size:
raise ValueError("total size of new array must <= size of parent")
ary = self._tensor.ravel()[:size].reshape(shape)
return self.__class__(
backend=self.backend,
ary=ary,
dtype=self._tensor.dtype,
base=self)
def hist(self, tag):
"""
Compute a histogram of the current tensor values.
Arguments:
tag (string): Tag to identify the current state of the tensor,
useful for disambiguating multiple histograms of the
same tensor at different points in time.
Returns:
Tensor containing the histogram data.
"""
nbins = self.backend.hist_bins
offset = self.backend.hist_offset
bins = np.arange(nbins + 1) + float(offset)
bins[0] = -float('Inf')
np_inp_log_abs = np.rint(
np.log2(np.abs(self._tensor.astype(np.float32))))
np_hist, edges = np.histogram(np_inp_log_abs, density=False, bins=bins)
nc_hist = self.backend._hist_tensor(tag)._assign(np_hist)
return nc_hist
# def repeat(self, repeats, axis):
# return self.__class__(
# backend=self.backend,
# ary=self._tensor.repeat(repeats, axis))
class CustomNumpy(object):
@staticmethod
def argmax(x, axis=1, keepdims=True):
"""
Calls numpy argmax with keepdims.
"""
new_shape = list(x.shape)
new_shape[axis] = 1
new_shape = tuple(new_shape)
return np.argmax(x, axis=axis).reshape(new_shape)
@staticmethod
def argmin(x, axis=1, keepdims=True):
"""
Calls numpy argmin with keepdims.
"""
new_shape = list(x.shape)
new_shape[axis] = 1
new_shape = tuple(new_shape)
return np.argmin(x, axis=axis).reshape(new_shape)
def _assign_right_to_left(left, right):
left[:] = right
numpy_call_dict = {
# assign
"assign": _assign_right_to_left,
# zero_operand ops
# unary ops
"neg": lambda left: -left,
"abs": lambda left: np.abs(left),
"sgn": lambda left: np.sign(left),
"sqrt": lambda left: np.sqrt(left),
"sqr": lambda left: np.square(left),
"exp": lambda left: np.exp(left),
"log": lambda left: np.log(left),
"safelog": lambda left: np.log(np.maximum(left, np.exp(-50.))),
"exp2": lambda left: np.exp2(left),
"log2": lambda left: np.log2(left),
"sig": lambda left: 1. / (1. + np.exp(-left)),
"sig2": lambda left: 1. / (1. + np.exp2(-left)),
"tanh": lambda left: np.tanh(left),
"tanh2": lambda left: (np.exp2(2. * left) - 1.) / (np.exp2(2. * left) + 1.),
"transpose": lambda left: np.transpose(left),
"rint": lambda left: np.rint(left),
# binary ops
"add": lambda left, right: left + right,
"sub": lambda left, right: left - right,
"mul": lambda left, right: left * right,
"div": lambda left, right: left / right,
"eq": lambda left, right: left == right,
"ne": lambda left, right: left != right,
"lt": lambda left, right: left < right,
"le": lambda left, right: left <= right,
"gt": lambda left, right: left > right,
"ge": lambda left, right: left >= right,
"pow": lambda left, right: np.power(left, right),
"minimum": lambda left, right: np.minimum(left, right),
"maximum": lambda left, right: np.maximum(left, right),
"dot": lambda left, right: np.dot(left, right),
# reduction ops
"sum": lambda op_dict, left: np.sum(left, axis=op_dict['axis'], keepdims=True),
"max": lambda op_dict, left: np.max(left, axis=op_dict['axis'], keepdims=True),
"min": lambda op_dict, left: np.min(left, axis=op_dict['axis'], keepdims=True),
"argmax": lambda op_dict, left: CustomNumpy.argmax(left, axis=op_dict['axis'], keepdims=True),
"argmin": lambda op_dict, left: CustomNumpy.argmin(left, axis=op_dict['axis'], keepdims=True),
}
class NervanaCPU(Backend):
"""
Sets up a :mod:`numpy` baseyd backend for matrix ops. By default, we use
32-bit element data types for any arrays constructed.
Attributes:
default_dtype (dtype): default element data type.
tensor_cls: underlying Tensor type. For CPU backend, it will be CPU tensor
See also:
:class:`CPUTensor`
"""
backend_name = 'cpu'
def __init__(self,
rng_seed=None,
default_dtype=np.float32,
hist_bins=64,
hist_offset=-48,
compat_mode=None,
# Ignored
num_devices=None,
stochastic_round=None,
device_id=None,
deterministic=None
):
if default_dtype not in [np.float16, np.float32, np.float64]:
logger.error('Default data type for nervanagpu '
'backend must be float16, 32 or 64')
raise ValueError
super(NervanaCPU, self).__init__(rng_seed, default_dtype, compat_mode=compat_mode)
# ensure an optimized BLAS is present and warn if not
try:
if not any(x in str(np.__config__.blas_opt_info['libraries']).lower()
for x in ['openblas', 'atlas', 'mkl', 'accelerate']):
logger.warn("No accelerated BLAS libraries found, CPU "
"performance may suffer. Consider installing "
"one of openblas, Atlas, MKL, or vecLib")
except (AttributeError, KeyError):
logger.warn("Problems inferring BLAS info, CPU performance may "
"be suboptimal")
self.device_type = 0
self.device_id = 0
self.tensor_cls = CPUTensor
logger.info("Initialized NervanaCPU")
self.hist_bins, self.hist_offset = None, None
self.set_hist_buffers(hist_bins, hist_offset)
self.use_pinned_mem = False
def consume(self, buf_index, hostlist, devlist):
assert 0 <= buf_index < 2, 'Can only double buffer'
if devlist[buf_index] is None:
devlist[buf_index] = self.empty_like(
hostlist[buf_index].T, dtype=hostlist[buf_index].dtype
)
devlist[buf_index][:] = hostlist[buf_index].T
def set_hist_buffers(self, hist_bins, hist_offset):
if (hist_bins != self.hist_bins or hist_offset != self.hist_offset):
self.hist_bins = hist_bins
self.hist_offset = hist_offset
self.hist_max = 4096
self.hist_buf = self.empty((self.hist_max, hist_bins), dtype=np.int32)
self.hist_idx = 0
self.hist_map = dict()
def gen_rng(self, seed=None):
"""
Generate the random number generator on host.
Arguments:
seed (int): random number generator seed
Returns:
seeded numpy RNG
"""
self.rng = np.random.RandomState(seed)
self.init_rng_state = self.rng_get_state()
return self.rng
def rng_set_state(self, state):
"""
Set the RNG state for host RNG.
Arguments:
state (np.array): numpy random number state vector
"""
self.rng.set_state(state)
def rng_get_state(self):
"""
Return the current state of the on-host RNG.
Returns:
np.array: the on-host RNG state vectors
"""
return self.rng.get_state()
def rng_reset(self):
"""
Reset the random state to the state where the Backend is first
initialized.
"""
self.rng_set_state(self.init_rng_state)
def fill_normal(self, ary, mean=0, stdv=1):
"""
Fill ary with normally distributed random numbers.
Arguments:
ary (Tensor): Tensor to fill with random values
mean (float): Mean value. Default 0
stdv (float): standard deviation value. Default 1
"""
ary[:] = np.random.standard_normal(ary.shape) * stdv + mean
def execute(self, optree):
"""
Execute the optree. Break optree into sub-optrees if necessary.
Arguments:
optree: (OpTreeNode): the OpTreeNode object that represents all
the operations
"""
# deal with onehot specially for now
if (len(optree) == 3 and isinstance(optree[2], OpTreeNode) and
optree[2][0]['op'] == 'onehot'):
assert optree[0]['op'] == 'assign'
assert isinstance(optree[1], Tensor)
# get the output buffer
array_output = optree[1]._tensor
# get the output shape and onehot representation length will be on
# this axis
numpy_axis = optree[2][0]['axis']
numpy_ind0 = optree[2][0]['idx']._tensor.squeeze()
numpy_ind_len = numpy_ind0.size
numpy_ind1 = list(range(numpy_ind_len))
# ind for indexing
numpy_ind = np.zeros((2, numpy_ind_len), dtype=np.int32)
numpy_ind[numpy_axis] = numpy_ind0
numpy_ind[1 - numpy_axis] = numpy_ind1
array_output[:] = 0
array_output[numpy_ind.tolist()] = 1
return array_output
# get post order stack
postfix_stack = optree.traverse(list())
# init compute stack
compute_stack = []
# iterate through postfix stack to compute result
for p in postfix_stack:
if isinstance(p, dict):
# TODO add rand and onehot here
if p['op'] in OpCollection.unary_ops:
left = compute_stack.pop()
compute_stack.append(numpy_call_dict[p['op']](left))
elif p['op'] in OpCollection.binary_ops:
right = compute_stack.pop()
left = compute_stack.pop()
compute_stack.append(numpy_call_dict[p['op']](left, right))
elif p['op'] in OpCollection.reduction_ops:
left = compute_stack.pop()
compute_stack.append(numpy_call_dict[p['op']](p, left))
elif p['op'] in OpCollection.zero_operand_ops:
compute_stack.append(numpy_call_dict[p['op']](None))
else:
raise NotImplementedError
elif isinstance(p, CPUTensor):
compute_stack.append(p._tensor)
else:
compute_stack.append(p)
assert len(compute_stack) == 1
return postfix_stack[0]
def empty(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of the CPUTensor class without initializing
individual element values.
Arguments:
shape (int, list): The size of each dimension of the Tensor.
dtype (dtype, optional): Element data type. If not specified we
use default_dtype value
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
CPUTensor: newly created data structure reference
"""
dtype = self.default_dtype if dtype is None else dtype
return self.tensor_cls(
backend=self,
ary=np.zeros(shape, dtype),
dtype=dtype,
name=name,
persist_values=persist_values)
def array(self, ary, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of the CPUTensor class setting each element
value to what is specified in ary.
Arguments:
ary (numpy.ndarray): The data structure containing element values
spread across a number of dimensions. Python
built-in types like ints and lists are
supported.
dtype (dtype, optional): Element data type. If not specified we
use default_dtype value ('float32'
unless overridden).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
CPUTensor: newly created data structure reference
"""
dtype = self.default_dtype if dtype is None else dtype
return self.tensor_cls(
backend=self,
ary=np.array(ary, dtype),
dtype=dtype,
name=name,
persist_values=persist_values)
def zeros(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of the CPUTensor class setting each element
value to 0.
Arguments:
shape (list of ints): The size of each dimension of the Tensor.
dtype (dtype, optional): Element data type. If not specified we
use default_dtype value ('float32'
unless overridden).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
CPUTensor: newly created data structure reference
"""
dtype = self.default_dtype if dtype is None else dtype
return self.tensor_cls(
backend=self,
ary=np.zeros(shape, dtype),
dtype=dtype,
name=name,
persist_values=persist_values)
def ones(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of the CPUTensor class setting each element
value to 1.
Arguments:
shape (list of ints): The size of each dimension of the Tensor.
dtype (dtype, optional): Element data type. If not specified we
use default_dtype value ('float32'
unless overridden).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
CPUTensor: newly created data structure reference
"""
dtype = self.default_dtype if dtype is None else dtype
return self.tensor_cls(
backend=self,
ary=np.ones(shape, dtype),
dtype=dtype,
name=name,
persist_values=persist_values)
def empty_like(self, ary, dtype=None, name=None, persist_values=True):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from ary.
Arguments:
ary (tensor object): Tensor to inherit the dimensions of.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
Tensor: array object
"""
dtype = self.default_dtype if dtype is None else dtype
return self.tensor_cls(
backend=self,
ary=np.zeros(ary.shape, dtype),
dtype=dtype,
name=name,
persist_values=persist_values)
def zeros_like(self, ary, dtype=None, name=None, persist_values=True):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from ary and populating each element with a value of 0.
Arguments:
ary (tensor object): Tensor to inherit the dimensions of.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
Tensor: array object
"""
dtype = self.default_dtype if dtype is None else dtype
return self.tensor_cls(
backend=self,
ary=np.zeros(ary.shape, dtype),
dtype=dtype,
name=name,
persist_values=persist_values)
def compound_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False, bsum=None):
"""
Doing following operations (* is dot product)
C = alpha * A * B + beta * C
C = alpha * A.T * B + beta * C
C = alpha * A * B.T + beta * C.
relu: if true applied before output (and prior to beta addition)
The operation will be short-circuited to: out <- alpha * left * right
if beta has value 0 (the default).
Arguments:
A, B (CPUTensor): input operands
C (CPUTensor): output
alpha (float): scale A*B term
beta (float): scale C term before sum
relu (bool): whether to apply ReLu before output
"""
# checking type and shape
assert A.dtype == B.dtype == C.dtype
assert A.shape[0] == C.shape[0]
assert B.shape[1] == C.shape[1]
assert A.shape[1] == B.shape[0]
# cleaner implementation, shall be equivalent to the one below
# if relu:
# C[:] = self.log(1. + self.exp(alpha * self.dot(A, B))) + beta * C
# else:
# C[:] = alpha * self.dot(A, B) + beta * C
if beta == 0:
if C._tensor.flags['C_CONTIGUOUS'] is not True:
tmp = np.empty(C.shape, dtype=C.dtype)
np.dot(A._tensor, B._tensor, tmp)
C._tensor[:] = tmp.copy()
else:
np.dot(A._tensor, B._tensor, C._tensor)
if relu:
self.Relu(C._tensor, C._tensor)
else:
np.multiply(C._tensor, beta, C._tensor)
tmp = np.empty(C.shape, dtype=C.dtype)
np.dot(A._tensor, B._tensor, tmp)
np.multiply(tmp, alpha, tmp)
if relu:
self.Relu(tmp, tmp)
np.add(C._tensor, tmp, C._tensor)
if bsum is not None:
bsum[:] = self.sum(C, 1)
return C
def batched_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False):
"""
Doing following operations:
1 For fprop: A(K, C), B(X,C,N), C(X,K,N) --> call batched_dot(A, B, C)
2 For bprop: A(K, C), B(X,K,N), C(X,C,N) --> call batched_dot(A.T, B, C)
3 For update: A(X,K,N), B(X,C,N), C(K,C) --> call batched_dot(A, B.T, C).
Arguments:
A, B (CPUTensor): input operands
C (CPUTensor): output
alpha, beta, relu: see usage in dot()
"""
assert A.dtype == B.dtype == C.dtype
dima, dimb, dimc = 0, 0, 0
# ldaz, ldbz, ldcz = 0, 0, 0 # commented for stylecheck
batch_grid, batch_loops = 1, 1
if len(A.shape) == 3:
dima = 1
if len(B.shape) == 3:
dimb = 1
assert dima or dimb, "Tensor A or B must have 3 dims to use batched_dot"
if len(C.shape) == 3:
dimc = 1
batch_grid = C.shape[0]
assert not dima or A.shape[0] == batch_grid
assert not dimb or B.shape[0] == batch_grid
if dima:
batch_loops = A.shape[0]
assert not dimb or B.shape[0] == batch_loops
elif dimb:
batch_loops = B.shape[0]
assert not dima or A.shape[0] == batch_loops
assert A.shape[0 + dima] == C.shape[0 + dimc]
assert B.shape[1 + dimb] == C.shape[1 + dimc]
assert A.shape[1 + dima] == B.shape[0 + dimb]
tmp = np.zeros(C.shape)
for i in range(batch_loops):
if dima:
tmp += np.dot(A._tensor[i], B._tensor[i])
else:
tmp[i] = np.dot(A._tensor, B._tensor[i])
np.multiply(tmp, alpha, tmp)
if relu:
self.Relu(tmp, tmp)
np.add(C._tensor * beta, tmp, C._tensor)
return C
def xnor_compound_dot(self, A, B, C, beta=0.0, bsum=None):
"""
Performs XNOR GEMM
C = A * B
Arguments:
A (Tensor): left-hand side operand.
B (Tensor): right-hand side operand.
C (Tensor): output operand
"""
# checking type and shape
assert A.dtype == B.dtype == C.dtype
assert A.shape[0] == C.shape[0]
assert B.shape[1] == C.shape[1]
assert A.shape[1] == B.shape[0]
np.dot(A._tensor, B._tensor, C._tensor)
if bsum is not None:
bsum[:] = self.sum(C, 1)
return C
def copy_transpose(self, a, out, axes=None, repeat=1):
"""
Function to perform a fast copy transpose/dimshuffle operation.
Works just like numpy.transpose, but requires an output tensor argument.
"""
out._tensor[:] = np.transpose(a._tensor, axes).copy()
def make_binary_mask(self, out, keepthresh=0.5):
"""
Create a binary mask for dropout layers.
Arguments:
out (CPUTensor): Output tensor
keepthresh (float): fraction of ones
"""
out._tensor[:] = np.array(
self.rng.uniform(size=out._tensor.shape) < keepthresh,
dtype=out._tensor.dtype)
def conv_layer(self, dtype,
N, C, K,
D=1, H=1, W=1,
T=1, R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1):
"""
Create a new ConvLayer parameter object.
This then is passed as an argument to all the convolution operations.
N: Number of images in mini-batch
C: Number of input feature maps
K: Number of output feature maps
D: Depth of input image
H: Height of input image
W: Width of input image
T: Depth of filter kernel
R: Height of filter kernel
S: Width of filter kernel
padding: amount of zero-padding around the given edge
strides: factor to step the filters by in a given direction
dtype: need to know dtype to setup proper kernels and params.
bsum: calculate the sum along the batchnorm axis for fprop or bprop
outputs an fp32 tensor of size Kx1
"""
return ConvLayer(self, dtype, N, C, K, D, H, W, T, R, S,
pad_d, pad_h, pad_w, str_d, str_h, str_w)
def fprop_conv(self, layer, I, F, O,
X=None, bias=None, bsum=None,
alpha=1.0, beta=0.0,
relu=False, brelu=False, slope=0.0):
"""
Forward propagate the inputs of a convolutional network layer to
produce output.
Arguments:
layer: the conv layer as a parameter object
I (CPUTensor): inputs
F (CPUTensor): the weights (filters)
O (CPUTensor): outputs
Compounding Options:
X: tensor to use in bprop_relu or beta
can be same as O for beta accumulate (this is default when None)
should be same shape as O
bias: (K,1) tensor to use for adding bias to output
O += bias
bsum: (K,1) tensor to accumulate batch sum over (used in batchnorm or bprop_bias)
bsum = sum(O.reshape(K,-1), axis=1)
the sum operation is fully deterministic
alpha, beta:
O = alpha*O + beta*X
O = alpha*O + beta*O (if X==O)
relu, slope: boolean flag to apply:
O = max(O, 0) + beta*min(O, 0)
can be combined with bias (where bias is added first)
brelu, slope: boolean flag to apply:
O *= (X > 0) + beta*(X < 0)
can be combined with bsum tensor to output bprop_bias
"""
layer.xprop_conv(I, F, O, X, bias, bsum, alpha, beta, relu, brelu, slope)
def bprop_conv(self, layer, F, E, grad_I,
X=None, bias=None, bsum=None,
alpha=1.0, beta=0.0,
relu=False, brelu=False, slope=0.0):
"""
Backward propagate the error through a convolutional network layer.
Arguments:
layer: the conv layer as a parameter object
F (CPUTensor): the weights (filters)
E (CPUTensor): errors
grad_I (CPUTensor): gradient to inputs (output delta)
Compounding Options:
X: tensor to use in bprop_relu or beta
can be same as grad_I for beta accumulate (this is default when None)
should be same shape as grad_I
bias: (K,1) tensor to use for adding bias to output
grad_I += bias
bsum: (K,1) tensor to accumulate batch sum over (used in batchnorm or bprop_bias)
bsum = sum(grad_I.reshape(K,-1), axis=1)
the sum operation is fully deterministic
alpha, beta:
grad_I = alpha*grad_I + beta*X
grad_I = alpha*grad_I + beta*grad_I (if X==grad_I)
relu, slope: boolean flag to apply:
grad_I = max(grad_I, 0) + slope*min(grad_I, 0)
can be combined with bias (where bias is added first)
brelu, slope: boolean flag to apply:
grad_I *= (X > 0) + slope*(X < 0)
can be combined with bsum tensor to output bprop_bias
"""
layer.xprop_conv(E, F, grad_I, X, bias, bsum, alpha, beta, relu, brelu, slope,
backward=True)
def update_conv(self, layer, I, E, U, alpha=1.0, beta=0.0):
"""
Compute the updated gradient for a convolutional network layer.
Arguments:
layer: the conv layer as a parameter object
I (CPUTensor): the inputs
E (CPUTensor): the errors
U (CPUTensor): the updates
alpha (float): linear scaling
beta (float): scaled accumulation
"""
assert layer.sizeI == I.size
assert layer.sizeO == E.size
assert layer.sizeF == U.size
layer.update_conv(I, E, U, alpha, beta)
def deconv_layer(self, dtype,
N, C, K,
P, Q,
R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1):
"""
Create a new DeconvLayer parameter object.
This then is passed as an argument to all the convolution operations.
N: Number of images in mini-batch
C: Number of output feature maps
K: Number of input feature maps
P: Height of input
Q: Width of input
D: Depth of output image
H: Height of output image
W: Width of output image
T: Depth of filter kernel
R: Height of filter kernel
S: Width of filter kernel
padding: amount of zero-padding around the given edge
strides: factor to step the filters by in a given direction
dtype: need to know dtype to setup proper kernels and params.
"""
return DeconvLayer(self, dtype, N, C, K, P, Q, R, S,
pad_d, pad_h, pad_w, str_d, str_h, str_w)
def lrn_layer(self, dtype, N, C, D=1, H=1, W=1, J=1):
"""
Create a new PoolLayer parameter object.
This then is passed as an argument to all pooling kernels.
N: Number of images in mini-batch
C: Number of input feature maps
H: Height of input image
W: Width of input image
J: Size of feature map pooling window (maxout n_pieces)
padding: amount of zero-padding around the given image or feature map edge
strides: factor to step the window by in a given direction (overlap allowed)
Leave spatial dimensions at 1 to allow feature map pooling in the fc layers.
"""
assert J % 2 == 1, "Only support odd LRN window size"
pad_c = J // 2
op = 'lrn'
# Bunch of defaults since we're only interested in the k-axis
lrn_opts = dict(T=1, R=1, S=1,
pad_c=pad_c,
pad_d=0, pad_h=0, pad_w=0,
str_c=1, str_d=1, str_h=1, str_w=1)
return PoolLayer(self, dtype, op, N, C, D, H, W, J, **lrn_opts)
def fprop_lrn(self, layer, I, O, denom, alpha=None, beta=None, ascale=1, bpower=1):
"""
Forward propagate pooling layer.
Arguments:
layer (PoolLayer): The pool layer object, different backends have
different pool layers.
I (Tensor): Input tensor.
O (Tensor): output tensor.
denom (Tensor): denominator tensor, stores the result of the squared pooling/contrast
ascale (float): scaling parameter (alpha) to multiply the pooled sum (1.25e-5 in AK)
bpower (float): exponential parameter (beta) to raise denominator by (0.75 in AK)
"""
assert layer.sizeI == I.size
assert layer.sizeO == O.size
J, T, R, S = layer.JTRS
C, D, H, W, N = layer.dimI
K, M, P, Q, N = layer.dimO
pad_c, pad_d, pad_h, pad_w = layer.padding
str_c, str_d, str_h, str_w = layer.strides
array_I = I._tensor.reshape(layer.dimI)
array_O = O._tensor.reshape(layer.dimO) # _tensor to write to
# although we can calculate directly into O, keeping denom around is useful for bprop
array_d = denom._tensor.reshape(layer.dimO) # _tensor to write to
for k in range(K):
sliceC, _ = layer.kSlice[k]
_ascale = ascale / J
for m in range(M):
sliceD, _ = layer.mSlice[m]
for p in range(P):
sliceH, _ = layer.pSlice[p]
for q in range(Q):
sliceW, _ = layer.qSlice[q]
sliceI = array_I[sliceC, sliceD, sliceH, sliceW, :].reshape(-1, N)
array_d[k, m, p, q, :] = 1 + _ascale * np.sum(np.square(sliceI), axis=0)
array_O[:] = array_I * np.power(array_d, -bpower) # elementwise divide by denominator
def bprop_lrn(self, layer, I, O, E, delta, denom, alpha=None, beta=None, ascale=1, bpower=1):
"""
Backward propagate pooling layer.
Arguments:
layer (PoolLayer): The pool layer object. Different backends have
different pool layers.
I (Tensor): Input tensor.
E (Tensor): Error tensor.
delta (Tensor): Gradient tensor (delta)
denom (Tensor): denominator tensor computed during bprop
ascale (float): scaling parameter (alpha) to multiply the pooled sum (1.25e-5 in AK)
bpower (float): exponential parameter (beta) to raise denominator by (0.75 in AK)
"""
assert layer.sizeI == I.size
assert layer.sizeO == E.size
assert layer.sizeI == delta.size
J, T, R, S = layer.JTRS
C, D, H, W, N = layer.dimI
K, M, P, Q, N = layer.dimO
pad_c, pad_d, pad_h, pad_w = layer.padding
str_c, str_d, str_h, str_w = layer.strides
array_I = I._tensor.reshape(layer.dimI)
array_E = E._tensor.reshape(layer.dimO)
array_O = O._tensor.reshape(layer.dimO)
array_delta = delta._tensor.reshape(layer.dimI) # write to
array_denom = denom._tensor.reshape(layer.dimO)
for k in range(K):
sliceC, _ = layer.kSlice[k]
for m in range(M):
sliceD, _ = layer.mSlice[m]
for p in range(P):
sliceH, _ = layer.pSlice[p]
for q in range(Q):
sliceW, _ = layer.qSlice[q]
_O = array_O[sliceC, sliceD, sliceH, sliceW, :].reshape(-1, N)
_E = array_E[sliceC, sliceD, sliceH, sliceW, :].reshape(-1, N)
_den = array_denom[sliceC, sliceD, sliceH, sliceW, :].reshape(-1, N)
# temporarily store part of the derivative in here
array_delta[k, m, p, q, :] = np.sum(_O * _E / _den, axis=0)
array_delta[:] = -2 * bpower * (ascale / float(J)) * array_delta * array_I + (
array_E * np.power(array_denom, -bpower))
def pool_layer(self, dtype,
op, N, C,
D=1, H=1, W=1,
J=1, T=1, R=1, S=1,
pad_c=0, pad_d=0, pad_h=0, pad_w=0,
str_c=None, str_d=None, str_h=None, str_w=None):
"""
Create a new PoolLayer parameter object.
This then is passed as an argument to all pooling kernels.
op: "max", "avg", "l2" pooling (currently bprop only supports max, but not avg and l2)
N: Number of images in mini-batch
C: Number of input feature maps
D: Depth of input image
H: Height of input image
W: Width of input image
J: Size of feature map pooling window (maxout n_pieces)
T: Depth of pooling window
R: Height of pooling window
S: Width of pooling window
padding: amount of zero-padding around the given image or feature map edge
strides: factor to step the window by in a given direction (overlap allowed)
Leave spatial dimensions at 1 to allow feature map pooling in the fc layers.
"""
# default to non-overlapping
if str_c is None:
str_c = J
if str_d is None:
str_d = T
if str_h is None:
str_h = R
if str_w is None:
str_w = S
return PoolLayer(self, dtype, op, N, C, D, H, W, J, T, R, S,
pad_c, pad_d, pad_h, pad_w, str_c, str_d, str_h, str_w)
def fprop_pool(self, layer, I, O, argmax=None, beta=0.0):
"""
Forward propagate pooling layer.
Arguments:
layer (PoolLayer): The pool layer object, different backends have
different pool layers.
I (Tensor): Input tensor.
O (Tensor): output tensor.
argmax (Tensor): tensor to store location of the maximum
"""
assert layer.sizeI == I.size
assert layer.sizeO == O.size
if layer.op == "max":
assert layer.sizeO == argmax.size
op = layer.op
J, T, R, S = layer.JTRS
C, D, H, W, N = layer.dimI
K, M, P, Q, N = layer.dimO
pad_c, pad_d, pad_h, pad_w = layer.padding
str_c, str_d, str_h, str_w = layer.strides
array_I = I._tensor.reshape(layer.dimI)
array_O = O._tensor.reshape(layer.dimO)
if op == "max":
array_argmax = argmax._tensor.reshape(layer.dimO)
for k in range(K):
sliceC, _ = layer.kSlice[k]
for m in range(M):
sliceD, _ = layer.mSlice[m]
for p in range(P):
sliceH, _ = layer.pSlice[p]
for q in range(Q):
sliceW, _ = layer.qSlice[q]
sliceI = array_I[sliceC, sliceD, sliceH, sliceW, :].reshape(-1, N)
if op == "max":
array_argmax[k, m, p, q, :] = np.argmax(sliceI, axis=0)
array_O[k, m, p, q, :] = array_O[k, m, p, q, :] * beta + \
np.max(sliceI, axis=0)
elif op == "avg":
array_O[k, m, p, q, :] = array_O[k, m, p, q, :] * beta + \
np.mean(sliceI, axis=0)
elif op == "l2":
array_O[k, m, p, q, :] = array_O[k, m, p, q, :] * beta + \
np.sqrt(np.sum(np.square(sliceI), axis=0))
def bprop_pool(self, layer, I, O, argmax=None, alpha=1.0, beta=0.0):
"""
Backward propagate pooling layer.
Arguments:
layer (PoolLayer): The pool layer object. Different backends have
different pool layers.
I (Tensor): Input (error) tensor.
O (Tensor): Output (delta) tensor.
argmax (Tensor): tensor to store location of the maximum
alpha (float): linear scaling (does not work for l2 pooling)
beta (float): accumulation value into grad_I
"""
assert layer.sizeI == O.size
assert layer.sizeO == I.size
if layer.op == "max":
assert layer.sizeO == argmax.size
op = layer.op
J, T, R, S = layer.JTRS
C, D, H, W, N = layer.dimI
K, M, P, Q, N = layer.dimO
pad_c, pad_d, pad_h, pad_w = layer.padding
str_c, str_d, str_h, str_w = layer.strides
array_E = I._tensor.reshape(layer.dimO)
array_E[:] = array_E * alpha
array_delta = O._tensor.reshape(layer.dimI)
array_delta[:] = array_delta * beta
if op == "max":
array_argmax = argmax._tensor.reshape(layer.dimO)
for k in range(K):
sliceC, clen = layer.kSlice[k]
for m in range(M):
sliceD, dlen = layer.mSlice[m]
for p in range(P):
sliceH, hlen = layer.pSlice[p]
for q in range(Q):
sliceW, wlen = layer.qSlice[q]
patch_in = (sliceC, sliceD, sliceH, sliceW, slice(None))
patch_out = (k, m, p, q, slice(None))
sliceB = array_delta[patch_in].reshape((-1, N))
if op == "max":
max_n = array_argmax[patch_out]
sliceB[max_n, list(range(N))] += array_E[patch_out]
elif op == "avg":
sliceB += array_E[patch_out] * (1.0 / sliceB.shape[0])
else:
raise NotImplementedError
array_delta[patch_in] = sliceB.reshape((clen, dlen, hlen, wlen, N))
def _roipooling_slice(self, h, stride, H, roi_offset):
"""
Slicing for ROIPooling along one dimension.
h: is the index on the pooled map (output index)
stride:
H: the max of the input map
roi_offset: how far hstart is from 0
"""
hstart = int(np.floor(float(h) * stride))
hend = int(np.ceil(float(h + 1) * stride))
hstart = min(max(hstart + roi_offset, 0), H)
hend = min(max(hend + roi_offset, 0), H)
return slice(hstart, hend), hend - hstart
def roipooling_fprop(self, I, rois, O, argmax, roi_count, C, H, W,
pooled_height, pooled_width, spatial_scale):
"""
Function to perform fprop of ROIPooling
Arguments:
I (Tensor): (C, H, W, N)
rois (Tensor): (ROIs, 5)
O (Tensor): (C, pooled_height, pooled_width, roi_count)
argmax (Tensor): (C, pooled_height, pooled_width, roi_count)
"""
assert I.size == C * H * W * self.bsz,\
"ROIPooling input feature map size do not match"
assert O.size == argmax.size == C * pooled_height * pooled_width * roi_count,\
"ROIPooling output shape do not match"
assert rois.shape[1] == 5, "ROIs should be on the row dimension"
assert rois.shape[0] == roi_count, "ROIs do not match with roi count"
array_fm = I._tensor.reshape(C, H, W, self.bsz)
array_rois = rois._tensor
array_O = O._tensor.reshape(C, pooled_height, pooled_width, roi_count)
array_argmax = argmax._tensor.reshape(C, pooled_height, pooled_width, roi_count)
array_O[:] = 0
array_argmax[:] = -1
# combine the feature map with ROIs
for b_id in xrange(roi_count):
[idx, xmin, ymin, xmax, ymax] = array_rois[b_id]
xmin = int(round(xmin * spatial_scale))
xmax = int(round(xmax * spatial_scale))
ymin = int(round(ymin * spatial_scale))
ymax = int(round(ymax * spatial_scale))
roi_width = max(xmax - xmin + 1, 1)
roi_height = max(ymax - ymin + 1, 1)
stride_h = float(roi_height) / float(pooled_height)
stride_w = float(roi_width) / float(pooled_width)
for h_out in xrange(pooled_height):
sliceh, lenh = self._roipooling_slice(h_out, stride_h, H, ymin)
if sliceh.stop <= sliceh.start:
continue
for w_out in xrange(pooled_width):
slicew, lenw = self._roipooling_slice(w_out, stride_w, W, xmin)
if slicew.stop <= slicew.start:
continue
else:
array_I = array_fm[:, sliceh, slicew, int(idx)].reshape(C, -1)
array_O[:, h_out, w_out, b_id] = np.max(array_I, axis=1)
# get the max idx respect to feature_maps coordinates
max_idx_slice = np.unravel_index(np.argmax(array_I, axis=1), (lenh, lenw))
max_idx_slice_h = max_idx_slice[0] + sliceh.start
max_idx_slice_w = max_idx_slice[1] + slicew.start
max_idx_slice = max_idx_slice_h * W + max_idx_slice_w
array_argmax[:, h_out, w_out, b_id] = max_idx_slice
def roipooling_bprop(self, I, rois, O, argmax, roi_count, C, H, W,
pooled_height, pooled_width, spatial_scale):
"""
Function to perform bprop of ROIPooling.
Arguments:
I (Tensor): input errors (C, pooled_height, pooled_width, roi_count)
argmax (Tensor): max args from the fprp (C, pooled_height, pooled_width, roi_count)
rois (Tensor): (ROIs, 5)
O (Tensor): output deltas (C, H, W, N)
"""
assert I.size == argmax.size == C * pooled_height * pooled_width * roi_count,\
"ROIPooling bprop input size do not match"
assert O.size == C * H * W * self.bsz,\
"ROIPooling bprop output size do not match"
assert rois.shape[1] == 5, "ROIs should be on the row dimension"
assert rois.shape[0] == roi_count, "ROIs do not match with roi count"
array_E = I._tensor.reshape(C, pooled_height, pooled_width, roi_count)
array_rois = rois._tensor
array_delta = O._tensor.reshape(C, H, W, self.bsz)
array_argmax = argmax._tensor.reshape(C, pooled_height, pooled_width, roi_count)
array_delta[:] = 0
for b_id in xrange(roi_count):
[idx, xmin, ymin, xmax, ymax] = array_rois[b_id]
xmin = int(round(xmin * spatial_scale))
xmax = int(round(xmax * spatial_scale))
ymin = int(round(ymin * spatial_scale))
ymax = int(round(ymax * spatial_scale))
roi_width = max(xmax - xmin + 1, 1)
roi_height = max(ymax - ymin + 1, 1)
stride_h = float(roi_height) / float(pooled_height)
stride_w = float(roi_width) / float(pooled_width)
# iterate all the w, h (from feature map) that fall into this ROIs
for w in range(xmin, xmax + 1):
for h in range(ymin, ymax + 1):
phstart = int(np.floor(float(h - ymin) / stride_h))
phend = int(np.ceil(float(h - ymin + 1) / stride_h))
pwstart = int(np.floor(float(w - xmin) / stride_w))
pwend = int(np.ceil(float(w - xmin + 1) / stride_w))
phstart = min(max(phstart, 0), pooled_height)
phend = min(max(phend, 0), pooled_height)
pwstart = min(max(pwstart, 0), pooled_width)
pwend = min(max(pwend, 0), pooled_width)
for ph in range(phstart, phend):
for pw in range(pwstart, pwend):
max_idx_tmp = array_argmax[:, ph, pw, b_id]
for c in range(C):
if max_idx_tmp[c] == (h * W + w):
array_delta[c, h, w, int(idx)] += array_E[c, ph, pw, b_id]
def nms(self, detections, threshold):
"""
Function to perform non-maximal supression.
Arguments:
detections (Tensor): detection boxes (box_count, 5), each row has
(x1, y1, x2, y2, score). Assume the boxes have already
been sorted based on score in descending order
output_mask (Tensor): pre-allocated buffer for mask output from the kernel
box_count (int): number of boxes
threshold (float): box overlap threshold, boxes with smaller overlaps will be kept
Outputs:
keep_ind (list): list of indices
"""
dets = detections.get()
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= threshold)[0]
order = order[inds + 1]
return keep
def compound_fprop_bn(self, x, xsum, xvar, gmean, gvar, gamma, beta, y, eps, rho,
accumbeta=0.0, relu=False, binary=False):
"""
Function to perform batch normalization forward pass. Included
for API compatibility with GPU compound kernel call.
Arguments:
x (Tensor): Input from previous layer
xsum (Tensor): Precomputed batch sum over PQN dimension
xvar (Tensor): Buffer for variance (computed in kernel)
gmean (Tensor): global mean ()
gvar (Tensor): global variance
gamma (Tensor): scale parameter
beta (Tensor): location paramter
y (Tensor): normalized output
eps (float): constant for numerical stability
rho (float): exponential window averaging constant
"""
xvar[:] = self.var(x, axis=1, binary=binary)
xsum[:] = xsum / x.shape[1] # reuse xsum instead of computing xmean
gmean[:] = gmean * rho + (1.0 - rho) * xsum
gvar[:] = gvar * rho + (1.0 - rho) * xvar
if binary:
xhat = self.shift(x - xsum, 1.0 / self.sqrt(xvar + eps))
outputs = y.reshape(xhat.shape)
outputs[:] = self.shift(xhat, gamma) + beta
else:
xhat = (x - xsum) / self.sqrt(xvar + eps)
outputs = y.reshape(xhat.shape)
outputs[:] = xhat * gamma + beta
def compound_bprop_bn(self, delta_out, grad_gamma, grad_beta, delta_in, x, xsum, xvar,
gamma, eps, binary=False):
"""
Function to perform batch normalization backward pass. Included
for API compatibility with GPU compound kernel call.
Arguments:
delta_out (Tensor): Delta buffer to write out to
grad_gamma (Tensor): Gradient w.r.t. gamma
grad_beta (Tensor): Gradient w.r.t. beta
delta_in (Tensor): Delta buffer to read from (incoming errors)
x (Tensor): feedforward input
xsum (Tensor): Batch sum over PQN dimension
xvar (Tensor): Batch variance
gamma (Tensor): scale parameter
eps (float): constant for numerical stability
binary (bool): Binary shift based computations
"""
if binary:
op = self.shift
else:
def multiply(left, right):
return left * right
op = multiply
inv_v = 1.0 / self.sqrt(xvar + eps)
xhat = op(x - xsum, inv_v)
grad_gamma[:] = self.sum(xhat * delta_in, axis=1)
grad_beta[:] = self.sum(delta_in, axis=1)
xtmp = (op(xhat, grad_gamma) + grad_beta) / float(x.shape[1])
delta_out.reshape(delta_in.shape)[:] = op(op(delta_in - xtmp, gamma), inv_v)
def compound_bprop_lut(self, nin, inputs, error, error_t, dW, pad_idx, alpha=1.0, beta=0):
"""
Backward propagate lookup table layer.
Arguments:
nin (int): Number of input word_ids.
inputs (Tensor): Input tensor.
error (Tensor): Error tensor.
error_t (Tensor): Transposed error tensor.
dW (Tensor): Gradient tensor (delta).
pad_idx (int):
alpha (float):
beta (float):
"""
wrd_ids = inputs._tensor[0]
unqidx, inv = np.unique(wrd_ids, return_inverse=True)
groups = [np.where(inv == i) for i in range(len(unqidx))]
for (wrd_id, group) in zip(unqidx, groups):
if wrd_id != pad_idx:
dW[wrd_id, :] = self.sum(error.take(group[0], axis=1), axis=1)
"""
alternative bprop
for (j, wrd_id) in enumerate(wrd_ids):
dW[:, wrd_id] = dW[:, wrd_id] + error[:, j]
"""
def _hist_tensor(self, tag):
"""
Create a tensor the right size for histogram data, with memory allocated
in the contiguous histogram buffer. Track it by tag for later reference.
"""
assert self.hist_idx < self.hist_max
self.hist_map[tag] = (self.hist_idx)
hist_buf = self.hist_buf[self.hist_idx]
self.hist_idx += 1
return hist_buf
def dump_hist_data(self):
hist_data = self.hist_buf
hist_map = self.hist_map
self.hist_map = dict()
self.hist_idx = 0
self.hist_buf = self.empty(
(self.hist_max, self.hist_bins), dtype=np.int32)
return hist_data, hist_map
def Relu(self, ary, out=None):
"""
Calculates the ReLu transformation for input array.
Arguments:
ary: numpy array
out: reference to output
"""
if out is not None:
return np.maximum(ary, 0, out)
else:
return np.maximum(ary, 0)
def binarize(self, ary, out, stochastic=True):
"""
Binarizes input array
Arguments:
ary: tensor
out: reference to output
stochastic: stochastic or deterministic
"""
if stochastic:
out[:] = (ary + 1)/2.0
self.clip(out, 0, 1, out)
prob = self.array(np.random.uniform(0, 1, size=ary.shape))
self.less_equal(prob, out, out)
else:
self.greater_equal(ary, 0, out)
out[:] = 2 * out - 1
return out
def shift(self, ary, shift_ary, value=True, out=None):
"""
Shifts input array
Arguments:
ary: tensor
shift_ary: tensor of shift amount
out: reference to output
"""
if value:
exp = self.rint(self.safelog(self.absolute(shift_ary))/self.log(2))
ap2 = self.multiply(self.sgn(shift_ary), self.exp2(exp))
else:
ap2 = self.exp2(shift_ary)
if out is None:
if hasattr(ary, 'shape'):
out = self.empty_like(ary)
else:
out = self.empty((1, 1))
out[:] = self.multiply(ary, ap2)
return out
def init_mark(self):
"""
Generate a timing mark object.
Returns:
timing mark (dict)
"""
return {'time': 0}
def record_mark(self, marker):
"""
Mark the current time.
Arguments:
marker (time mark): timing mark generated by init_mark()
"""
marker['time'] = time.time()
def synchronize_mark(self, marker):
"""
Synchronize on the given marker.
Arguments:
marker (time mark): timing mark generated by init_mark()
"""
# No-op on cpu
return
def get_time(self, start, end):
"""
Return time between start and end marks.
Arguments:
start (time maker): start time mark
end (time marker): end time mark
Returns:
time elapsed between start and end time marks in milliseconds
"""
return (end['time'] - start['time']) * 1000.0
|
en
| 0.694363
|
# ---------------------------------------------------------------------------- # Copyright 2014-2016 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- Our CPU based backend interface and tensor data structure. Our implementation wraps :mod:`numpy` ndarray and related operations # TODO: enable this flag to find numerical problems # np.seterr(all='raise') The n-dimensional array data structure that resides in host memory, and is meant to be manipulated on the CPU. wrapped `numpy.ndarray` tensor. Arguments: dtype (numpy.ndtype, optional): underlying data type of the elements. ary (data array, optional): optionally it can be instantiated with a data array persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls See also: :class:`NervanaCPU` class # supported dtypes Returns a string representation of this Tensor. Returns: str: the representation. Returns a more unambiguous string representation of the Tensor. Returns: str: the representation. Return the size of the leading dimension of self. Assign the specified value to a subset of elements found via slice style indexing along each dimension. e.g. A[5:10, :] = 4.5. Each slice consists of start_idx:stop_idx:step_size triplets. If step_size isn't specified it defaults to 1. If start_idx isn't specified it defaults to 0. If stop_idx isn't specified it defaults to the total number of elements along that dimension. As such a slice value of ':' allows one to select all elements along that dimension. Arguments: key (int, slice, tuple): indices of each dimension's slice. value (numeric array, CPUTensor): values to be assigned to the extracted element subset. If an array it should be the same shape as what key indexes (or be broadcastable as such). Extract a subset view of the items via slice style indexing along each dimension. e.g. A[5:10, :]. Each slice consists of start_idx:stop_idx:step_size triplets. If step_size isn't specified it defaults to 1. If start_idx isn't specified it defaults to 0. If stop_idx isn't specified it defaults to the total number of elements along that dimension. As such a slice value of ':' allows one to select all elements along that dimension. To be consistent with GPU Tensors, CPU Tensors remove the axis that has size 1 unless it needs to maintain 2D. Arguments: key (int, slice, tuple): indices of each dimension's slice. Returns: CPUTensor: view of self corresponding to the subset items. # speed up common case of [:] # ensure we return a view # exact same behavior as cpu # let a.shape = (3,4) # a[1,1] = 10 # cpu, gpu and numpy # type(a[1,1]) # for cpu and gpu type is Tensor; for numpy type is float # return a view of the tensor Assign an input value to the CPU tensor. The NervanaCPU does clipping for int and uint types, when overflow happens Arguments: value (CPUTensor, OpTreeNode, numeric): the value to be assigned. Wrap the value into NervanaCPU tensor. Arguments: value: Array or single input. If it is array, check and Convert the dtype and shape. If it is single value, broadcast to the memory Returns: self Return the array. Access the raw buffer. Returns: pointer: A device specific pointer Deprecated. Scheduled to be removed in 2.0. Use get() instead. Select a subset of elements from an array across an axis. Arguments: indices (Tensor, numpy ndarray): indicies of elements to select axis (int): axis across which to select the values Returns: Tensor: Tensor with selected values # if indices are nx1 or 1xn, much of our code assumes these dims are # collapsed, hence the squeeze call. Assign specified value to each element of this CPUTensor. Arguments: value (numeric): The value to be assigned to each element. Return: CPUTensor: updated view of the data. Construct and return a deep copy of the Tensor passed. Arguments: a (Tensor): the object to copy Returns: Tensor: new array object with the same values as input tensor Alias of copy. Arguments: a (Tensor): the object to copy Returns: Tensor: new array object with the same values as input tensor Return a reshaped view. Return a transposed view. For 2D tensor, will do a normal transpose For 3D tensor, will keep the 0 dim, swap the 1 and 2 dimensions # support for batched dot. # perserve outer dimension but reverse inner dims # shape = np.concatenate((shape[-1:], shape[:-1]) Return a transposed view of the data. Alias of .T property Return a view: ary, where ary.size <= self.size. Allows easy sharing of temporary memory This is mostly provided for compatibility, -- dtype is ignored Compute a histogram of the current tensor values. Arguments: tag (string): Tag to identify the current state of the tensor, useful for disambiguating multiple histograms of the same tensor at different points in time. Returns: Tensor containing the histogram data. # def repeat(self, repeats, axis): # return self.__class__( # backend=self.backend, # ary=self._tensor.repeat(repeats, axis)) Calls numpy argmax with keepdims. Calls numpy argmin with keepdims. # assign # zero_operand ops # unary ops # binary ops # reduction ops Sets up a :mod:`numpy` baseyd backend for matrix ops. By default, we use 32-bit element data types for any arrays constructed. Attributes: default_dtype (dtype): default element data type. tensor_cls: underlying Tensor type. For CPU backend, it will be CPU tensor See also: :class:`CPUTensor` # Ignored # ensure an optimized BLAS is present and warn if not Generate the random number generator on host. Arguments: seed (int): random number generator seed Returns: seeded numpy RNG Set the RNG state for host RNG. Arguments: state (np.array): numpy random number state vector Return the current state of the on-host RNG. Returns: np.array: the on-host RNG state vectors Reset the random state to the state where the Backend is first initialized. Fill ary with normally distributed random numbers. Arguments: ary (Tensor): Tensor to fill with random values mean (float): Mean value. Default 0 stdv (float): standard deviation value. Default 1 Execute the optree. Break optree into sub-optrees if necessary. Arguments: optree: (OpTreeNode): the OpTreeNode object that represents all the operations # deal with onehot specially for now # get the output buffer # get the output shape and onehot representation length will be on # this axis # ind for indexing # get post order stack # init compute stack # iterate through postfix stack to compute result # TODO add rand and onehot here Instantiate a new instance of the CPUTensor class without initializing individual element values. Arguments: shape (int, list): The size of each dimension of the Tensor. dtype (dtype, optional): Element data type. If not specified we use default_dtype value persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: CPUTensor: newly created data structure reference Instantiate a new instance of the CPUTensor class setting each element value to what is specified in ary. Arguments: ary (numpy.ndarray): The data structure containing element values spread across a number of dimensions. Python built-in types like ints and lists are supported. dtype (dtype, optional): Element data type. If not specified we use default_dtype value ('float32' unless overridden). persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: CPUTensor: newly created data structure reference Instantiate a new instance of the CPUTensor class setting each element value to 0. Arguments: shape (list of ints): The size of each dimension of the Tensor. dtype (dtype, optional): Element data type. If not specified we use default_dtype value ('float32' unless overridden). persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: CPUTensor: newly created data structure reference Instantiate a new instance of the CPUTensor class setting each element value to 1. Arguments: shape (list of ints): The size of each dimension of the Tensor. dtype (dtype, optional): Element data type. If not specified we use default_dtype value ('float32' unless overridden). persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: CPUTensor: newly created data structure reference Instantiate a new instance of this backend's Tensor class, with the shape taken from ary. Arguments: ary (tensor object): Tensor to inherit the dimensions of. dtype (data-type, optional): If present, specifies the underlying type to employ for each element. persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: Tensor: array object Instantiate a new instance of this backend's Tensor class, with the shape taken from ary and populating each element with a value of 0. Arguments: ary (tensor object): Tensor to inherit the dimensions of. dtype (data-type, optional): If present, specifies the underlying type to employ for each element. persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: Tensor: array object Doing following operations (* is dot product) C = alpha * A * B + beta * C C = alpha * A.T * B + beta * C C = alpha * A * B.T + beta * C. relu: if true applied before output (and prior to beta addition) The operation will be short-circuited to: out <- alpha * left * right if beta has value 0 (the default). Arguments: A, B (CPUTensor): input operands C (CPUTensor): output alpha (float): scale A*B term beta (float): scale C term before sum relu (bool): whether to apply ReLu before output # checking type and shape # cleaner implementation, shall be equivalent to the one below # if relu: # C[:] = self.log(1. + self.exp(alpha * self.dot(A, B))) + beta * C # else: # C[:] = alpha * self.dot(A, B) + beta * C Doing following operations: 1 For fprop: A(K, C), B(X,C,N), C(X,K,N) --> call batched_dot(A, B, C) 2 For bprop: A(K, C), B(X,K,N), C(X,C,N) --> call batched_dot(A.T, B, C) 3 For update: A(X,K,N), B(X,C,N), C(K,C) --> call batched_dot(A, B.T, C). Arguments: A, B (CPUTensor): input operands C (CPUTensor): output alpha, beta, relu: see usage in dot() # ldaz, ldbz, ldcz = 0, 0, 0 # commented for stylecheck Performs XNOR GEMM C = A * B Arguments: A (Tensor): left-hand side operand. B (Tensor): right-hand side operand. C (Tensor): output operand # checking type and shape Function to perform a fast copy transpose/dimshuffle operation. Works just like numpy.transpose, but requires an output tensor argument. Create a binary mask for dropout layers. Arguments: out (CPUTensor): Output tensor keepthresh (float): fraction of ones Create a new ConvLayer parameter object. This then is passed as an argument to all the convolution operations. N: Number of images in mini-batch C: Number of input feature maps K: Number of output feature maps D: Depth of input image H: Height of input image W: Width of input image T: Depth of filter kernel R: Height of filter kernel S: Width of filter kernel padding: amount of zero-padding around the given edge strides: factor to step the filters by in a given direction dtype: need to know dtype to setup proper kernels and params. bsum: calculate the sum along the batchnorm axis for fprop or bprop outputs an fp32 tensor of size Kx1 Forward propagate the inputs of a convolutional network layer to produce output. Arguments: layer: the conv layer as a parameter object I (CPUTensor): inputs F (CPUTensor): the weights (filters) O (CPUTensor): outputs Compounding Options: X: tensor to use in bprop_relu or beta can be same as O for beta accumulate (this is default when None) should be same shape as O bias: (K,1) tensor to use for adding bias to output O += bias bsum: (K,1) tensor to accumulate batch sum over (used in batchnorm or bprop_bias) bsum = sum(O.reshape(K,-1), axis=1) the sum operation is fully deterministic alpha, beta: O = alpha*O + beta*X O = alpha*O + beta*O (if X==O) relu, slope: boolean flag to apply: O = max(O, 0) + beta*min(O, 0) can be combined with bias (where bias is added first) brelu, slope: boolean flag to apply: O *= (X > 0) + beta*(X < 0) can be combined with bsum tensor to output bprop_bias Backward propagate the error through a convolutional network layer. Arguments: layer: the conv layer as a parameter object F (CPUTensor): the weights (filters) E (CPUTensor): errors grad_I (CPUTensor): gradient to inputs (output delta) Compounding Options: X: tensor to use in bprop_relu or beta can be same as grad_I for beta accumulate (this is default when None) should be same shape as grad_I bias: (K,1) tensor to use for adding bias to output grad_I += bias bsum: (K,1) tensor to accumulate batch sum over (used in batchnorm or bprop_bias) bsum = sum(grad_I.reshape(K,-1), axis=1) the sum operation is fully deterministic alpha, beta: grad_I = alpha*grad_I + beta*X grad_I = alpha*grad_I + beta*grad_I (if X==grad_I) relu, slope: boolean flag to apply: grad_I = max(grad_I, 0) + slope*min(grad_I, 0) can be combined with bias (where bias is added first) brelu, slope: boolean flag to apply: grad_I *= (X > 0) + slope*(X < 0) can be combined with bsum tensor to output bprop_bias Compute the updated gradient for a convolutional network layer. Arguments: layer: the conv layer as a parameter object I (CPUTensor): the inputs E (CPUTensor): the errors U (CPUTensor): the updates alpha (float): linear scaling beta (float): scaled accumulation Create a new DeconvLayer parameter object. This then is passed as an argument to all the convolution operations. N: Number of images in mini-batch C: Number of output feature maps K: Number of input feature maps P: Height of input Q: Width of input D: Depth of output image H: Height of output image W: Width of output image T: Depth of filter kernel R: Height of filter kernel S: Width of filter kernel padding: amount of zero-padding around the given edge strides: factor to step the filters by in a given direction dtype: need to know dtype to setup proper kernels and params. Create a new PoolLayer parameter object. This then is passed as an argument to all pooling kernels. N: Number of images in mini-batch C: Number of input feature maps H: Height of input image W: Width of input image J: Size of feature map pooling window (maxout n_pieces) padding: amount of zero-padding around the given image or feature map edge strides: factor to step the window by in a given direction (overlap allowed) Leave spatial dimensions at 1 to allow feature map pooling in the fc layers. # Bunch of defaults since we're only interested in the k-axis Forward propagate pooling layer. Arguments: layer (PoolLayer): The pool layer object, different backends have different pool layers. I (Tensor): Input tensor. O (Tensor): output tensor. denom (Tensor): denominator tensor, stores the result of the squared pooling/contrast ascale (float): scaling parameter (alpha) to multiply the pooled sum (1.25e-5 in AK) bpower (float): exponential parameter (beta) to raise denominator by (0.75 in AK) # _tensor to write to # although we can calculate directly into O, keeping denom around is useful for bprop # _tensor to write to # elementwise divide by denominator Backward propagate pooling layer. Arguments: layer (PoolLayer): The pool layer object. Different backends have different pool layers. I (Tensor): Input tensor. E (Tensor): Error tensor. delta (Tensor): Gradient tensor (delta) denom (Tensor): denominator tensor computed during bprop ascale (float): scaling parameter (alpha) to multiply the pooled sum (1.25e-5 in AK) bpower (float): exponential parameter (beta) to raise denominator by (0.75 in AK) # write to # temporarily store part of the derivative in here Create a new PoolLayer parameter object. This then is passed as an argument to all pooling kernels. op: "max", "avg", "l2" pooling (currently bprop only supports max, but not avg and l2) N: Number of images in mini-batch C: Number of input feature maps D: Depth of input image H: Height of input image W: Width of input image J: Size of feature map pooling window (maxout n_pieces) T: Depth of pooling window R: Height of pooling window S: Width of pooling window padding: amount of zero-padding around the given image or feature map edge strides: factor to step the window by in a given direction (overlap allowed) Leave spatial dimensions at 1 to allow feature map pooling in the fc layers. # default to non-overlapping Forward propagate pooling layer. Arguments: layer (PoolLayer): The pool layer object, different backends have different pool layers. I (Tensor): Input tensor. O (Tensor): output tensor. argmax (Tensor): tensor to store location of the maximum Backward propagate pooling layer. Arguments: layer (PoolLayer): The pool layer object. Different backends have different pool layers. I (Tensor): Input (error) tensor. O (Tensor): Output (delta) tensor. argmax (Tensor): tensor to store location of the maximum alpha (float): linear scaling (does not work for l2 pooling) beta (float): accumulation value into grad_I Slicing for ROIPooling along one dimension. h: is the index on the pooled map (output index) stride: H: the max of the input map roi_offset: how far hstart is from 0 Function to perform fprop of ROIPooling Arguments: I (Tensor): (C, H, W, N) rois (Tensor): (ROIs, 5) O (Tensor): (C, pooled_height, pooled_width, roi_count) argmax (Tensor): (C, pooled_height, pooled_width, roi_count) # combine the feature map with ROIs # get the max idx respect to feature_maps coordinates Function to perform bprop of ROIPooling. Arguments: I (Tensor): input errors (C, pooled_height, pooled_width, roi_count) argmax (Tensor): max args from the fprp (C, pooled_height, pooled_width, roi_count) rois (Tensor): (ROIs, 5) O (Tensor): output deltas (C, H, W, N) # iterate all the w, h (from feature map) that fall into this ROIs Function to perform non-maximal supression. Arguments: detections (Tensor): detection boxes (box_count, 5), each row has (x1, y1, x2, y2, score). Assume the boxes have already been sorted based on score in descending order output_mask (Tensor): pre-allocated buffer for mask output from the kernel box_count (int): number of boxes threshold (float): box overlap threshold, boxes with smaller overlaps will be kept Outputs: keep_ind (list): list of indices Function to perform batch normalization forward pass. Included for API compatibility with GPU compound kernel call. Arguments: x (Tensor): Input from previous layer xsum (Tensor): Precomputed batch sum over PQN dimension xvar (Tensor): Buffer for variance (computed in kernel) gmean (Tensor): global mean () gvar (Tensor): global variance gamma (Tensor): scale parameter beta (Tensor): location paramter y (Tensor): normalized output eps (float): constant for numerical stability rho (float): exponential window averaging constant # reuse xsum instead of computing xmean Function to perform batch normalization backward pass. Included for API compatibility with GPU compound kernel call. Arguments: delta_out (Tensor): Delta buffer to write out to grad_gamma (Tensor): Gradient w.r.t. gamma grad_beta (Tensor): Gradient w.r.t. beta delta_in (Tensor): Delta buffer to read from (incoming errors) x (Tensor): feedforward input xsum (Tensor): Batch sum over PQN dimension xvar (Tensor): Batch variance gamma (Tensor): scale parameter eps (float): constant for numerical stability binary (bool): Binary shift based computations Backward propagate lookup table layer. Arguments: nin (int): Number of input word_ids. inputs (Tensor): Input tensor. error (Tensor): Error tensor. error_t (Tensor): Transposed error tensor. dW (Tensor): Gradient tensor (delta). pad_idx (int): alpha (float): beta (float): alternative bprop for (j, wrd_id) in enumerate(wrd_ids): dW[:, wrd_id] = dW[:, wrd_id] + error[:, j] Create a tensor the right size for histogram data, with memory allocated in the contiguous histogram buffer. Track it by tag for later reference. Calculates the ReLu transformation for input array. Arguments: ary: numpy array out: reference to output Binarizes input array Arguments: ary: tensor out: reference to output stochastic: stochastic or deterministic Shifts input array Arguments: ary: tensor shift_ary: tensor of shift amount out: reference to output Generate a timing mark object. Returns: timing mark (dict) Mark the current time. Arguments: marker (time mark): timing mark generated by init_mark() Synchronize on the given marker. Arguments: marker (time mark): timing mark generated by init_mark() # No-op on cpu Return time between start and end marks. Arguments: start (time maker): start time mark end (time marker): end time mark Returns: time elapsed between start and end time marks in milliseconds
| 1.822438
| 2
|
config/settings/base.py
|
Billykat7/btkweathers
| 0
|
6625941
|
import os
import configparser
from pathlib import Path
from decouple import config
BASE_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
)
)
SECRET_KEY = config('SECRET_KEY')
OPENWEATHER_API_KEY = config('OPENWEATHER_API_KEY')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# API
'rest_framework',
# 3rd party Apps
'crispy_forms',
# Project's Apps
'weather.apps.web.climate',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'weather/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# DATABASE_ROUTERS = ['config.router.AuthRouter']
CONFIG_DIR = os.path.join(BASE_DIR, 'config/')
parser = configparser.ConfigParser()
parser.read_file(open(os.path.join(CONFIG_DIR, 'app.ini')))
DATABASES = {}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Johannesburg'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
BASE_PATH = os.path.join(BASE_DIR)
APP_STATIC = 'weather/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_PATH, APP_STATIC)
#
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_PATH, f'{APP_STATIC}/media')
|
import os
import configparser
from pathlib import Path
from decouple import config
BASE_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
)
)
SECRET_KEY = config('SECRET_KEY')
OPENWEATHER_API_KEY = config('OPENWEATHER_API_KEY')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# API
'rest_framework',
# 3rd party Apps
'crispy_forms',
# Project's Apps
'weather.apps.web.climate',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'weather/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# DATABASE_ROUTERS = ['config.router.AuthRouter']
CONFIG_DIR = os.path.join(BASE_DIR, 'config/')
parser = configparser.ConfigParser()
parser.read_file(open(os.path.join(CONFIG_DIR, 'app.ini')))
DATABASES = {}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Johannesburg'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
BASE_PATH = os.path.join(BASE_DIR)
APP_STATIC = 'weather/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_PATH, APP_STATIC)
#
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_PATH, f'{APP_STATIC}/media')
|
en
| 0.620971
|
# Application definition # API # 3rd party Apps # Project's Apps # DATABASE_ROUTERS = ['config.router.AuthRouter'] # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ #
| 1.75915
| 2
|
scripts/speechToText.py
|
UNICEF-AI-For-Child-Online-Violence/AI-for-child-online-violence
| 3
|
6625942
|
from google.cloud import speech
import os
import io
import libs.Constants
import wave
import contextlib
__file__ = libs.Constants.FILE_PATH
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = libs.Constants.GOOGLE_APPLICATION_CREDENTIALS
def frame_rate_channel(audio_file_name):
print(audio_file_name)
with wave.open(audio_file_name, "rb") as wave_file:
frames = wave_file.getnframes()
frame_rate = wave_file.getframerate()
channels = wave_file.getnchannels()
duration = (frames / float(frame_rate))
return frame_rate, channels, duration
def short_audio(audio_path, channels):
# Creates google client
client = speech.SpeechClient()
# Full path of the audio file, Replace with your file name
# file_name = os.path.join(os.path.dirname(__file__),"test.wav")
#Loads the audio file into memory
with io.open(audio_path, "rb") as audio_file:
content = audio_file.read()
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
audio_channel_count=channels,
language_code="en-US",
)
# Sends the request to google to transcribe the audio
response = client.recognize(request={"config": config, "audio": audio})
# Reads the response
for result in response.results:
print("Transcript: {}".format(result.alternatives[0].transcript))
def long_audio(audio_path, channels):
"""Transcribe the given audio file asynchronously."""
client = speech.SpeechClient()
with io.open(audio_path, "rb") as audio_file:
content = audio_file.read()
"""
Note that transcription is limited to a 60 seconds audio file.
Use a GCS (Google Cloud Storage) file for audio longer than 1 minute.
"""
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
audio_channel_count=channels,
# sample_rate_hertz=16000,
language_code="en-US",
)
operation = client.long_running_recognize(config=config, audio=audio)
print("Waiting for operation to complete...")
response = operation.result(timeout=180)
# Each result is for a consecutive portion of the audio. Iterate through
# them to get the transcripts for the entire audio file.
for result in response.results:
# The first alternative is the most likely one for this portion.
print(u"Transcript: {}".format(result.alternatives[0].transcript))
print("Confidence: {}".format(result.alternatives[0].confidence))
def main():
audio_path = input("\nPlease enter the path to the audio you wish to test: ")
print("Path entered: ", audio_path)
# Dynamically get the frame rate and channels from audio file
frame_rate, channels, duration = frame_rate_channel(audio_path)
if (duration < 60):
print("short", duration)
short_audio(audio_path, channels)
elif (duration >= 60):
print("long", duration)
long_audio(audio_path, channels)
else:
print("Invalid Audio File, please check that the file is a .wav file and try again.")
if __name__ == '__main__':
main()
|
from google.cloud import speech
import os
import io
import libs.Constants
import wave
import contextlib
__file__ = libs.Constants.FILE_PATH
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = libs.Constants.GOOGLE_APPLICATION_CREDENTIALS
def frame_rate_channel(audio_file_name):
print(audio_file_name)
with wave.open(audio_file_name, "rb") as wave_file:
frames = wave_file.getnframes()
frame_rate = wave_file.getframerate()
channels = wave_file.getnchannels()
duration = (frames / float(frame_rate))
return frame_rate, channels, duration
def short_audio(audio_path, channels):
# Creates google client
client = speech.SpeechClient()
# Full path of the audio file, Replace with your file name
# file_name = os.path.join(os.path.dirname(__file__),"test.wav")
#Loads the audio file into memory
with io.open(audio_path, "rb") as audio_file:
content = audio_file.read()
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
audio_channel_count=channels,
language_code="en-US",
)
# Sends the request to google to transcribe the audio
response = client.recognize(request={"config": config, "audio": audio})
# Reads the response
for result in response.results:
print("Transcript: {}".format(result.alternatives[0].transcript))
def long_audio(audio_path, channels):
"""Transcribe the given audio file asynchronously."""
client = speech.SpeechClient()
with io.open(audio_path, "rb") as audio_file:
content = audio_file.read()
"""
Note that transcription is limited to a 60 seconds audio file.
Use a GCS (Google Cloud Storage) file for audio longer than 1 minute.
"""
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
audio_channel_count=channels,
# sample_rate_hertz=16000,
language_code="en-US",
)
operation = client.long_running_recognize(config=config, audio=audio)
print("Waiting for operation to complete...")
response = operation.result(timeout=180)
# Each result is for a consecutive portion of the audio. Iterate through
# them to get the transcripts for the entire audio file.
for result in response.results:
# The first alternative is the most likely one for this portion.
print(u"Transcript: {}".format(result.alternatives[0].transcript))
print("Confidence: {}".format(result.alternatives[0].confidence))
def main():
audio_path = input("\nPlease enter the path to the audio you wish to test: ")
print("Path entered: ", audio_path)
# Dynamically get the frame rate and channels from audio file
frame_rate, channels, duration = frame_rate_channel(audio_path)
if (duration < 60):
print("short", duration)
short_audio(audio_path, channels)
elif (duration >= 60):
print("long", duration)
long_audio(audio_path, channels)
else:
print("Invalid Audio File, please check that the file is a .wav file and try again.")
if __name__ == '__main__':
main()
|
en
| 0.857668
|
# Creates google client # Full path of the audio file, Replace with your file name # file_name = os.path.join(os.path.dirname(__file__),"test.wav") #Loads the audio file into memory # Sends the request to google to transcribe the audio # Reads the response Transcribe the given audio file asynchronously. Note that transcription is limited to a 60 seconds audio file. Use a GCS (Google Cloud Storage) file for audio longer than 1 minute. # sample_rate_hertz=16000, # Each result is for a consecutive portion of the audio. Iterate through # them to get the transcripts for the entire audio file. # The first alternative is the most likely one for this portion. # Dynamically get the frame rate and channels from audio file
| 3.192695
| 3
|
setup.py
|
elParaguayo/qtile-widget-unitstatus
| 2
|
6625943
|
from setuptools import setup
setup(
name='qtile-widget-unitstatus',
packages=['unitstatus'],
version='0.1.0',
description='A widget to show status of systemd unit',
author='elParaguayo',
url='https://github.com/elparaguayo/qtile-widget-unitstatus',
license='MIT',
install_requires=['qtile>0.14.2', 'pydbus']
)
|
from setuptools import setup
setup(
name='qtile-widget-unitstatus',
packages=['unitstatus'],
version='0.1.0',
description='A widget to show status of systemd unit',
author='elParaguayo',
url='https://github.com/elparaguayo/qtile-widget-unitstatus',
license='MIT',
install_requires=['qtile>0.14.2', 'pydbus']
)
|
none
| 1
| 1.383459
| 1
|
|
csv_utils.py
|
zionchao/AutoPublish
| 3
|
6625944
|
# @author zhangchao
# @date 2020/5/30 14:18
import csv
def read_contents(file_path,codec="utf-8"):
contents = []
with open(file_path, newline='\n', encoding=codec) as csv_file:
rows = csv.reader(csv_file)
for i, row in enumerate(rows):
if i == 0:
continue
contents.append(row)
return contents
def update_item(file_path, item_list):
head = ['URL', 'STATUS']
write_head = True
with open(file_path, 'w+', encoding='utf-8', newline='') as f: # 1. 创建文件对象
csv_writer = csv.writer(f) # 2. 基于文件对象构建 csv写入对象
if write_head: # 3. 构建列表头
csv_writer.writerow(head)
csv_writer.writerows(item_list) # 4. 写入csv文件内容
pass
if __name__ == '__main__':
print(read_contents("comments.csv",codec="gbk"))
pass
|
# @author zhangchao
# @date 2020/5/30 14:18
import csv
def read_contents(file_path,codec="utf-8"):
contents = []
with open(file_path, newline='\n', encoding=codec) as csv_file:
rows = csv.reader(csv_file)
for i, row in enumerate(rows):
if i == 0:
continue
contents.append(row)
return contents
def update_item(file_path, item_list):
head = ['URL', 'STATUS']
write_head = True
with open(file_path, 'w+', encoding='utf-8', newline='') as f: # 1. 创建文件对象
csv_writer = csv.writer(f) # 2. 基于文件对象构建 csv写入对象
if write_head: # 3. 构建列表头
csv_writer.writerow(head)
csv_writer.writerows(item_list) # 4. 写入csv文件内容
pass
if __name__ == '__main__':
print(read_contents("comments.csv",codec="gbk"))
pass
|
zh
| 0.781692
|
# @author zhangchao # @date 2020/5/30 14:18 # 1. 创建文件对象 # 2. 基于文件对象构建 csv写入对象 # 3. 构建列表头 # 4. 写入csv文件内容
| 3.086559
| 3
|
raspberryPi/stream_client.py
|
albert0329/Final_project
| 3,417
|
6625945
|
<reponame>albert0329/Final_project<gh_stars>1000+
import io
import socket
import struct
import time
import picamera
# create socket and bind host
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('192.168.1.100', 8000))
connection = client_socket.makefile('wb')
try:
with picamera.PiCamera() as camera:
camera.resolution = (320, 240) # pi camera resolution
camera.framerate = 15 # 15 frames/sec
time.sleep(2) # give 2 secs for camera to initilize
start = time.time()
stream = io.BytesIO()
# send jpeg format video stream
for foo in camera.capture_continuous(stream, 'jpeg', use_video_port = True):
connection.write(struct.pack('<L', stream.tell()))
connection.flush()
stream.seek(0)
connection.write(stream.read())
if time.time() - start > 600:
break
stream.seek(0)
stream.truncate()
connection.write(struct.pack('<L', 0))
finally:
connection.close()
client_socket.close()
|
import io
import socket
import struct
import time
import picamera
# create socket and bind host
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('192.168.1.100', 8000))
connection = client_socket.makefile('wb')
try:
with picamera.PiCamera() as camera:
camera.resolution = (320, 240) # pi camera resolution
camera.framerate = 15 # 15 frames/sec
time.sleep(2) # give 2 secs for camera to initilize
start = time.time()
stream = io.BytesIO()
# send jpeg format video stream
for foo in camera.capture_continuous(stream, 'jpeg', use_video_port = True):
connection.write(struct.pack('<L', stream.tell()))
connection.flush()
stream.seek(0)
connection.write(stream.read())
if time.time() - start > 600:
break
stream.seek(0)
stream.truncate()
connection.write(struct.pack('<L', 0))
finally:
connection.close()
client_socket.close()
|
en
| 0.701981
|
# create socket and bind host # pi camera resolution # 15 frames/sec # give 2 secs for camera to initilize # send jpeg format video stream
| 2.748872
| 3
|
css/models.py
|
jasonsawatzky/CSS
| 0
|
6625946
|
<reponame>jasonsawatzky/CSS
from django.db import models
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from django.conf import settings
import MySQLdb
import re
from django.db import IntegrityError
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from util import DepartmentSettings
from settings import DEPARTMENT_SETTINGS
import json
import operator
from django.db.models import Q
from django.http import JsonResponse
# System User class,
# Wrapper for django builtin class, contains user + application specific data
class CUser(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
user_type = models.CharField(max_length=16)
@staticmethod
def validate_email(email):
if re.match(r'^[A-Za-z0-9\._%+\-]+@[A-Za-z0-9\.\-]+\.[A-Za-z]{2,}$', email) is None:
raise ValidationError("Attempted CUser creation"+"with invalid email address")
return email
# Password must:
# be 8-32 chars, have: 1 alphachar, 1 digit, 1 specialchar
@staticmethod
def validate_password(password):
if re.match(r'^(?=.*\d)(?=.*[A-Za-z])(?=.*[-._!@#$%^&*?+])[A-Za-z0-9-._!@#$%^&*?+]{8,32}$', password) is None:
raise ValidationError("Attempted CUser creation with invalid password")
return password
@staticmethod
def validate_user_type(user_type):
if user_type != 'scheduler' and user_type != 'faculty':
raise ValidationError("Attempted CUser creation with invalid user_type")
return user_type
@staticmethod
def validate_first_name(first_name):
if first_name and len(first_name) > 30:
raise ValidationError("Attempted CUser creation with a first_name longer than 30 characters")
return first_name
@staticmethod
def validate_last_name(last_name):
if last_name and len(last_name) > 30:
raise ValidationError("Attempted CUser creation with a last_name longer than 30 characters")
return last_name
@classmethod
def validate_name(cls, first_name, last_name):
if first_name and len(first_name) > 30:
raise ValidationError("Attempted CUser creation with a first_name longer than 30 characters")
if last_name and len(last_name) > 30:
raise ValidationError("Attempted CUser creation with a last_name longer than 30 characters")
if CUser.objects.filter(user__first_name=first_name, user__last_name=last_name).exists():
raise ValidationError("Attempted CUser creation with duplicate full name.")
@classmethod
def create(cls, email, password, user_type, first_name, last_name):
try:
cls.validate_name(first_name, last_name)
user = cls(user=User.objects.create_user(username=cls.validate_email(email),
email=cls.validate_email(email),
password=<PASSWORD>.<PASSWORD>_password(password),
first_name=cls.validate_first_name(first_name),
last_name=cls.validate_last_name(last_name)),
user_type=cls.validate_user_type(user_type))
user.save()
# If user is faculty, create an associated faculty details
# Target work hours and units are initially 0
if user_type == 'faculty':
FacultyDetails.create(user, 0, 0).save()
except:
raise
return user
# Return cuser by email
@classmethod
def get_user(cls, email): # Throws ObjectDoesNotExist
return cls.objects.get(user__username=email)
# Return cuser by full name
def get_cuser_by_full_name(cls, full_name):
first_name = full_name.split()[0]
last_name = full_name.split()[1]
print first_name + last_name
return cls.objects.get(user__first_name=first_name,
user__last_name=last_name)
# Return faculty cuser by email
@classmethod
def get_faculty(cls, email): # Throws ObjectDoesNotExist
return cls.objects.get(user__username=email, user_type='faculty')
# Return all faculty cusers
@classmethod
def get_all_faculty(cls):
return cls.objects.filter(user_type='faculty')
# Return faculty full name
@classmethod
def get_all_faculty_full_name(cls):
faculty_list = cls.objects.filter(user_type='faculty')
names_list = []
for faculty in faculty_list:
names_list.append('{0} {1}'.format(faculty.user.first_name, faculty.user.last_name))
return names_list
# Return scheduler cuser by email
@classmethod
def get_scheduler(cls, email): # Throws ObjectDoesNotExist
return cls.objects.get(user__username=email, user_type='scheduler')
# Return all scheduler cusers
@classmethod
def get_all_schedulers(cls):
return cls.objects.filter(user_type='scheduler')
# Return cuser email
@classmethod
def get_email(self):
return self.user.username
# Set the first name
@classmethod
def set_first_name(self, first_name):
self.first_name = first_name
self.save()
# Set the last name
@classmethod
def set_last_name(self, last_name):
self.last_name = last_name
self.save()
# Set the password
@classmethod
def set_password(self, pword):
self.password = pword
self.save()
def to_json(self):
return dict(id = self.id,
name = self.user.first_name + self.user.last_name,
email = self.user.email)
class FacultyDetails(models.Model):
# The user_id uses the User ID as a primary key.
# Whenever this User is deleted, this entry in the table will also be deleted
faculty = models.OneToOneField(CUser, on_delete=models.CASCADE)
target_work_units = models.IntegerField(default=0, null=True) # in units
target_work_hours = models.IntegerField(default=0, null=True) # in hours
changed_preferences = models.CharField(max_length=1) # 'y' or 'n'
@classmethod
def create(cls, faculty, target_work_units, target_work_hours):
faculty = cls(faculty=faculty, target_work_units=target_work_units,
target_work_hours=target_work_hours, changed_preferences='n')
faculty.save()
return faculty
def change_details(self, new_work_units=None, new_work_hours=None):
if new_work_units:
self.target_work_units = new_work_units
if new_work_hours:
self.target_work_hours = new_work_hours
self.changed_preferences = 'y'
# @TODO Function to yes changed_preferences to 'n'? Also consider naming it something
# more indicative -> preferences_have_changed? has_changed_preferences? etc.
# ---------- Resource Models ----------
# Room represents department rooms
class Room(models.Model):
name = models.CharField(max_length=32, unique=True)
description = models.CharField(max_length=256, null=True)
capacity = models.IntegerField(default=0)
notes = models.CharField(max_length=1024, null=True)
equipment = models.CharField(max_length=1024, null=True)
@classmethod
def create(cls, name, description, capacity, notes, equipment):
if name is None:
raise ValidationError("Room name is required")
elif len(name) > 32:
raise ValidationError("Room name is longer than 32 characters")
elif description and len(description) > 256:
raise ValidationError("Room description is longer than 256 characters")
elif notes and len(notes) > 1024:
raise ValidationError("Room notes is longer than 1024 characters")
elif equipment and len(equipment) > 256:
raise ValidationError("Room equipment is longer than 1024 characters")
else:
room = cls(name=name,
description=description,
capacity=capacity,
notes=notes,
equipment=equipment)
room.save()
return room
@classmethod
def get_room(cls, name):
return Room.objects.get(name=name)
@classmethod
def get_all_rooms(cls):
return cls.objects.filter()
def to_json(self):
return dict(id = self.id,
name = self.name,
description = self.description,
capacity = self.capacity,
notes = self.notes,
equipment = self.equipment)
# Course represents a department course offering
class Course(models.Model):
name = models.CharField(max_length=16, unique=True)
equipment_req = models.CharField(max_length=2048, null=True)
description = models.CharField(max_length=2048, null=True)
@classmethod
def create(cls, name, equipment_req, description):
if len(name) > 16:
raise ValidationError("Name is longer than 16 characters, making it invalid.")
if len(equipment_req) > 2048:
raise ValidationError("Description is longer than 2048 characters, making it invalid.")
if len(description) > 2048:
raise ValidationError("Description is longer than 2048 characters, making it invalid.")
course = cls(name=name,
equipment_req=equipment_req,
description=description)
course.save()
return course
# Returns all course objects
@classmethod
def get_all_courses(cls):
return cls.objects.filter()
# Returns course by name
@classmethod
def get_course(cls, name):
return cls.objects.get(name=name)
def to_json(self):
return dict(id = self.id,
name = self.name,
equipment_req = self.equipment_req,
description = self.description)
# Set the equipment required for this course
def set_equipment_req(self, equipment_req):
self.equipment_req = equipment_req
self.save()
# Set the description of this course
def set_description(self, description):
self.description = description
self.save()
# Get all section types associated with this course
def get_all_section_types(self):
return WorkInfo.objects.filter(course=self)
# Get a specific section type associated with this course
def get_section_type(self, section_type_name): # Throws ObjectDoesNotExist, MultipleObjectsReturned
section_type = SectionType.get_section_type(section_type_name)
return WorkInfo.objects.get(course=self, section_type=section_type)
# Associate a new section type with this course
def add_section_type(self, section_type_name, work_units, work_hours): # Throws ObjectDoesNotExist
section_type = SectionType.get_section_type(section_type_name)
WorkInfo.create(self, section_type, work_units, work_hours)
# Remove association between section type and course
def remove_section_type(self, section_type_name): # Throws ObjectDoesNotExist
#section_type = SectionType.get_section_type(section_type_name)
self.get_section_type(section_type_name).delete()
#WorkInfo.create(self, section_type, work_units, work_hours)
# Retrieve all section types for this course and format them as JSON
def get_all_section_types_JSON(self):
courseSectionTypes = self.get_all_section_types()
print("Found " + str(courseSectionTypes.count()) + " course section types")
sectionTypesDictionary = {}
i = 0
for sectionType in courseSectionTypes:
print(sectionType.section_type.name)
sectionTypesDictionary[i] = {
'course_name': sectionType.course.name,
'section_type_name': sectionType.section_type.name,
'work_units': sectionType.work_units,
'work_hours': sectionType.work_hours
}
i+=1
return JsonResponse(sectionTypesDictionary)
class SectionType(models.Model):
name = models.CharField(max_length=32, unique=True) # eg. lecture or lab
@classmethod
def create(cls, name):
if len(name) > 32:
raise ValidationError("Section Type name exceeds 32 characters.")
section_type = cls(name=name)
section_type.save()
return section_type
@classmethod
def get_section_type(cls, name):
print("CHECK")
return cls.objects.filter(name=name)[0]
#return cls.objects.get(name=name)
@classmethod
def get_all_section_types(cls):
return SectionType.objects.all()
@classmethod
def get_all_section_types_list(cls):
list = []
for sectionType in SectionType.objects.all():
list.append((sectionType.name, sectionType.name))
return tuple(list)
# WorkInfo contains the user defined information for specific Course-SectionType pairs
# Each pair has an associated work units and work hours defined by the department
class WorkInfo(models.Model):
class Meta:
unique_together = (("course", "section_type"),)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
section_type = models.ForeignKey(SectionType, on_delete=models.CASCADE)
work_units = models.IntegerField(default=0)
work_hours = models.IntegerField(default=0)
#classmethod?
def getJSON(self):
return JsonResponse({
'course_name': self.course.name,
'section_type_name': self.section_type.name,
'work_units': self.work_units,
'work_hours': self.work_hours
})
@classmethod
def create(cls, course, section_type, work_units, work_hours):
work_info = cls(course=course, section_type=section_type,
work_units=work_units, work_hours=work_hours)
work_info.save()
return work_info
class Availability(models.Model):
class Meta:
unique_together = (("faculty", "days_of_week", "start_time"),)
faculty = models.OneToOneField(CUser, on_delete=models.CASCADE, null=True)
days_of_week = models.CharField(max_length=16) # MWF or TR
start_time = models.TimeField()
start_type = models.CharField(max_length=2, default="AM")
end_time = models.TimeField()
end_type = models.CharField(max_length=2, default="AM")
level = models.CharField(max_length=16) # available, preferred, unavailable
@classmethod
def create(cls, email, days, start, s_type, end, e_type, level):
faculty = CUser.get_faculty(email=email)
if days is None or len(days) > 16 or (days != "MWF" and days != "TR"):
raise ValidationError("Invalid days of week input")
elif (start is None):
raise ValidationError("Need to input start time")
elif (s_type is None):
raise ValidationError("Need to input start type")
elif (end is None):
raise ValidationError("Need to input end time")
elif (e_type is None):
raise ValidationError("Need to input end type")
elif (level is None) or (level != "available" and level != "preferred" and level != "unavailable"):
raise ValidationError("Need to input level of availability: preferred, available, or unavailable")
else:
availability = cls(faculty=faculty, days_of_week=days, start_time=start, start_type=s_type, end_time=end, end_type=e_type, level=level)
availability.save()
return availability
# ---------- Scheduling Models ----------
# Schedule is a container for scheduled sections and correponds to exactly 1 academic term
class Schedule(models.Model):
academic_term = models.CharField(max_length=16, unique=True) # eg. "Fall 2016"
state = models.CharField(max_length=16, default="active") # eg. active or finalized
def finalize_schedule(self):
self.state = "finalized"
def return_to_active(self):
self.state = "active"
@classmethod
def create(cls, academic_term, state):
if state != "finalized" and state != "active":
raise ValidationError("Invalid schedule state.")
else:
schedule = cls(academic_term=academic_term, state=state)
schedule.save()
return schedule
@classmethod
def get_schedule(cls, term_name):
return cls.objects.get(academic_term=term_name)
@classmethod
def get_all_schedules(cls):
return cls.objects.filter();
def to_json(self):
return dict(
academic_term = self.academic_term)
# Section is our systems primary scheduled object
# Each section represents a department section that is planned for a particular schedule
class Section(models.Model):
schedule = models.ForeignKey(Schedule, on_delete=models.CASCADE)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
section_type = models.ForeignKey(SectionType, null=True, on_delete=models.SET_NULL)
start_time = models.TimeField()
end_time = models.TimeField()
days = models.CharField(max_length=8) # MWF or TR
faculty = models.ForeignKey(CUser, null=True, on_delete=models.SET_NULL)
room = models.ForeignKey(Room, null=True, on_delete=models.SET_NULL)
capacity = models.IntegerField(default=0)
students_enrolled = models.IntegerField(default=0)
students_waitlisted = models.IntegerField(default=0)
conflict = models.CharField(max_length=1, default='n') # y or n
conflict_reason = models.CharField(max_length=8, null=True) # faculty or room
fault = models.CharField(max_length=1, default='n') # y or n
fault_reason = models.CharField(max_length=8, null=True) # faculty or room
@classmethod
def create(
cls, term_name, course_name, section_type, start_time, end_time, days, faculty_email, room_name,
capacity, students_enrolled, students_waitlisted, conflict,
conflict_reason, fault, fault_reason):
# these objects will actually be passed into the Section because of the ForeignKey
schedule = Schedule.get_schedule(term_name)
course = Course.get_course(course_name)
section_type = SectionType.get_section_type(section_type)
faculty = CUser.get_faculty(faculty_email)
room = Room.get_room(room_name)
if DEPARTMENT_SETTINGS.start_time and start_time < DEPARTMENT_SETTINGS.start_time:
raise ValidationError("Invalid start time for department.")
if DEPARTMENT_SETTINGS.end_time and end_time > DEPARTMENT_SETTINGS.end_time or end_time < start_time:
raise ValidationError("Invalid end time for department.")
if days != "MWF" and days != "TR":
raise ValidationError("Invalid days of the week.")
if capacity < 0:
raise ValidationError("Invalid section capacity.")
if students_enrolled < 0:
raise ValidationError("Invalid number of enrolled students.")
if students_waitlisted < 0:
raise ValidationError("Invalid number of students waitlisted.")
if conflict != 'y' and conflict != 'n':
raise ValidationError("Invalid value for conflict.")
if conflict == 'y' and conflict_reason != "faculty" and conflict_reason != "room":
raise ValidationError("Invalid conflict reason.")
if fault != 'y' and fault != 'n':
raise ValidationError("Invalid value for fault.")
if fault == 'y' and fault_reason != "faculty" and fault_reason != "room":
raise ValidationError("Invalid fault reason.")
section = cls(
schedule=schedule,
course=course,
section_type=section_type,
start_time=start_time,
end_time=end_time,
days=days,
faculty=faculty,
room=room,
capacity=capacity,
students_enrolled=students_enrolled,
students_waitlisted=students_waitlisted,
conflict=conflict,
conflict_reason=conflict_reason,
fault=fault,
fault_reason=fault_reason)
section.save()
return section
@classmethod
def get_section(cls, **kwargs):
for k,v in kwargs.iteritems():
if k == 'schedule':
return cls.objects.get(schedule=Schedule.get_schedule(v))
elif k == 'course':
return cls.objects.get(course=Course.get_course(v))
elif k == 'faculty':
return cls.objects.get(faculty=CUser.get_faculty(v))
elif k == 'room':
return cls.objects.get(room=Room.get_room(v))
else:
return cls.objects.get(k=v)
# this function takes in a dictionary object of filters that has been serialized from a JSON object based on what the user has selected
# for filtering by time, it will only take in an array of pairs (an array of 2-piece arrays) so that it will at least have a start time and end time.
#### there can also be chunks of time, so there are multiple start and end times
# for any other filter, we will pass on the keyword and array argument as it is to the filter.
@classmethod
def filter_json(cls, json_string):
return cls.filter(json.loads(json_string))
@classmethod
def filter(cls, filter_dict):
andList = []
ands = False
orList = []
ors = False
timeList = []
timeLogicList = []
timeLogic = ''
prevLogic = ''
andQuery = ''
orQuery = ''
timeQuery = ''
finalQuery = ''
for key,tags in filter_dict.iteritems():
if 'logic' not in tags or 'filters' not in tags:
raise ValidationError("JSON not set up correctly. 'logic' and 'filters' are required keys in each filter type.")
logic = tags['logic']
filters = tags['filters']
if key == "time":
for k,v in filters.iteritems():
timeLogic = logic
if k == "MWF" or k == "TR":
for times in range(len(v)):
timeList += [reduce(operator.and_, [Q(days=k), Q(start_time__gte=v[times][0]), Q(end_time__lte=v[times][1])])]
if timeList:
timeQuery = reduce(operator.or_, timeList)
else:
queryLoop = Q()
for index in range(len(filters)):
if key == "course":
filterObject = Course.get_course(filters[index])
queryLoop = reduce(operator.or_, [queryLoop, Q(course=filterObject)])
elif key == "faculty":
filterObject = CUser.get_faculty(filters[index])
queryLoop = reduce(operator.or_, [queryLoop, Q(faculty=filterObject)])
elif key == "room":
filterObject = Room.get_room(filters[index])
queryLoop = reduce(operator.or_, [queryLoop, Q(room=filterObject)])
else:
raise ValidationError("Invalid filter type.")
if 'or' in logic:
ors = True
orList += [queryLoop]
elif 'and' in logic or logic is '':
ands = True
andList += [queryLoop]
if ands is True:
andQuery = reduce(operator.and_, andList)
if (timeQuery is not None) and ('and' in timeLogic):
andQuery = reduce(operator.and_, [andQuery, timeQuery])
finalQuery = andQuery
if ors is True:
orQuery = reduce(operator.and_, orList)
if (timeQuery is not None) and ('or' in timeLogic):
orQuery = reduce(operator.or_, [orQuery, timeQuery])
if finalQuery != '':
finalQuery = reduce(operator.or_, [finalQuery, orQuery])
else:
finalQuery = orQuery
if finalQuery == '':
finalQuery = timeQuery
return Section.objects.filter(finalQuery)
class FacultyCoursePreferences(models.Model):
faculty = models.ForeignKey(CUser, on_delete = models.CASCADE)
course = models.ForeignKey(Course, on_delete = models.CASCADE)
comments = models.CharField(max_length=2048, null=True, default="No comments.")
rank = models.IntegerField(default = 0)
@classmethod
def create(cls, faculty, course, comments, rank):
course_pref = cls(
faculty=faculty,
course=course,
comments=comments,
rank=rank)
course_pref.save()
return course_pref
@classmethod
def get_faculty_pref(cls, faculty):
entries = cls.objects.filter(faculty=faculty)
return entries
@classmethod
def get_course_list(cls, faculty):
entries = cls.objects.filter(faculty=faculty)
# join the course ID to the course table
course_arr = []
for entry in entries: # go through and make list of tuples (rank, course_name, course_description, comments)
course_arr += [(entry.rank, entry.course.name, entry.course.description, entry.comments)]
course_arr.sort(key=lambda tup:tup[0]) # sort courses by rank (first spot in tuple)
return course_arr
|
from django.db import models
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from django.conf import settings
import MySQLdb
import re
from django.db import IntegrityError
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from util import DepartmentSettings
from settings import DEPARTMENT_SETTINGS
import json
import operator
from django.db.models import Q
from django.http import JsonResponse
# System User class,
# Wrapper for django builtin class, contains user + application specific data
class CUser(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
user_type = models.CharField(max_length=16)
@staticmethod
def validate_email(email):
if re.match(r'^[A-Za-z0-9\._%+\-]+@[A-Za-z0-9\.\-]+\.[A-Za-z]{2,}$', email) is None:
raise ValidationError("Attempted CUser creation"+"with invalid email address")
return email
# Password must:
# be 8-32 chars, have: 1 alphachar, 1 digit, 1 specialchar
@staticmethod
def validate_password(password):
if re.match(r'^(?=.*\d)(?=.*[A-Za-z])(?=.*[-._!@#$%^&*?+])[A-Za-z0-9-._!@#$%^&*?+]{8,32}$', password) is None:
raise ValidationError("Attempted CUser creation with invalid password")
return password
@staticmethod
def validate_user_type(user_type):
if user_type != 'scheduler' and user_type != 'faculty':
raise ValidationError("Attempted CUser creation with invalid user_type")
return user_type
@staticmethod
def validate_first_name(first_name):
if first_name and len(first_name) > 30:
raise ValidationError("Attempted CUser creation with a first_name longer than 30 characters")
return first_name
@staticmethod
def validate_last_name(last_name):
if last_name and len(last_name) > 30:
raise ValidationError("Attempted CUser creation with a last_name longer than 30 characters")
return last_name
@classmethod
def validate_name(cls, first_name, last_name):
if first_name and len(first_name) > 30:
raise ValidationError("Attempted CUser creation with a first_name longer than 30 characters")
if last_name and len(last_name) > 30:
raise ValidationError("Attempted CUser creation with a last_name longer than 30 characters")
if CUser.objects.filter(user__first_name=first_name, user__last_name=last_name).exists():
raise ValidationError("Attempted CUser creation with duplicate full name.")
@classmethod
def create(cls, email, password, user_type, first_name, last_name):
try:
cls.validate_name(first_name, last_name)
user = cls(user=User.objects.create_user(username=cls.validate_email(email),
email=cls.validate_email(email),
password=<PASSWORD>.<PASSWORD>_password(password),
first_name=cls.validate_first_name(first_name),
last_name=cls.validate_last_name(last_name)),
user_type=cls.validate_user_type(user_type))
user.save()
# If user is faculty, create an associated faculty details
# Target work hours and units are initially 0
if user_type == 'faculty':
FacultyDetails.create(user, 0, 0).save()
except:
raise
return user
# Return cuser by email
@classmethod
def get_user(cls, email): # Throws ObjectDoesNotExist
return cls.objects.get(user__username=email)
# Return cuser by full name
def get_cuser_by_full_name(cls, full_name):
first_name = full_name.split()[0]
last_name = full_name.split()[1]
print first_name + last_name
return cls.objects.get(user__first_name=first_name,
user__last_name=last_name)
# Return faculty cuser by email
@classmethod
def get_faculty(cls, email): # Throws ObjectDoesNotExist
return cls.objects.get(user__username=email, user_type='faculty')
# Return all faculty cusers
@classmethod
def get_all_faculty(cls):
return cls.objects.filter(user_type='faculty')
# Return faculty full name
@classmethod
def get_all_faculty_full_name(cls):
faculty_list = cls.objects.filter(user_type='faculty')
names_list = []
for faculty in faculty_list:
names_list.append('{0} {1}'.format(faculty.user.first_name, faculty.user.last_name))
return names_list
# Return scheduler cuser by email
@classmethod
def get_scheduler(cls, email): # Throws ObjectDoesNotExist
return cls.objects.get(user__username=email, user_type='scheduler')
# Return all scheduler cusers
@classmethod
def get_all_schedulers(cls):
return cls.objects.filter(user_type='scheduler')
# Return cuser email
@classmethod
def get_email(self):
return self.user.username
# Set the first name
@classmethod
def set_first_name(self, first_name):
self.first_name = first_name
self.save()
# Set the last name
@classmethod
def set_last_name(self, last_name):
self.last_name = last_name
self.save()
# Set the password
@classmethod
def set_password(self, pword):
self.password = pword
self.save()
def to_json(self):
return dict(id = self.id,
name = self.user.first_name + self.user.last_name,
email = self.user.email)
class FacultyDetails(models.Model):
# The user_id uses the User ID as a primary key.
# Whenever this User is deleted, this entry in the table will also be deleted
faculty = models.OneToOneField(CUser, on_delete=models.CASCADE)
target_work_units = models.IntegerField(default=0, null=True) # in units
target_work_hours = models.IntegerField(default=0, null=True) # in hours
changed_preferences = models.CharField(max_length=1) # 'y' or 'n'
@classmethod
def create(cls, faculty, target_work_units, target_work_hours):
faculty = cls(faculty=faculty, target_work_units=target_work_units,
target_work_hours=target_work_hours, changed_preferences='n')
faculty.save()
return faculty
def change_details(self, new_work_units=None, new_work_hours=None):
if new_work_units:
self.target_work_units = new_work_units
if new_work_hours:
self.target_work_hours = new_work_hours
self.changed_preferences = 'y'
# @TODO Function to yes changed_preferences to 'n'? Also consider naming it something
# more indicative -> preferences_have_changed? has_changed_preferences? etc.
# ---------- Resource Models ----------
# Room represents department rooms
class Room(models.Model):
name = models.CharField(max_length=32, unique=True)
description = models.CharField(max_length=256, null=True)
capacity = models.IntegerField(default=0)
notes = models.CharField(max_length=1024, null=True)
equipment = models.CharField(max_length=1024, null=True)
@classmethod
def create(cls, name, description, capacity, notes, equipment):
if name is None:
raise ValidationError("Room name is required")
elif len(name) > 32:
raise ValidationError("Room name is longer than 32 characters")
elif description and len(description) > 256:
raise ValidationError("Room description is longer than 256 characters")
elif notes and len(notes) > 1024:
raise ValidationError("Room notes is longer than 1024 characters")
elif equipment and len(equipment) > 256:
raise ValidationError("Room equipment is longer than 1024 characters")
else:
room = cls(name=name,
description=description,
capacity=capacity,
notes=notes,
equipment=equipment)
room.save()
return room
@classmethod
def get_room(cls, name):
return Room.objects.get(name=name)
@classmethod
def get_all_rooms(cls):
return cls.objects.filter()
def to_json(self):
return dict(id = self.id,
name = self.name,
description = self.description,
capacity = self.capacity,
notes = self.notes,
equipment = self.equipment)
# Course represents a department course offering
class Course(models.Model):
name = models.CharField(max_length=16, unique=True)
equipment_req = models.CharField(max_length=2048, null=True)
description = models.CharField(max_length=2048, null=True)
@classmethod
def create(cls, name, equipment_req, description):
if len(name) > 16:
raise ValidationError("Name is longer than 16 characters, making it invalid.")
if len(equipment_req) > 2048:
raise ValidationError("Description is longer than 2048 characters, making it invalid.")
if len(description) > 2048:
raise ValidationError("Description is longer than 2048 characters, making it invalid.")
course = cls(name=name,
equipment_req=equipment_req,
description=description)
course.save()
return course
# Returns all course objects
@classmethod
def get_all_courses(cls):
return cls.objects.filter()
# Returns course by name
@classmethod
def get_course(cls, name):
return cls.objects.get(name=name)
def to_json(self):
return dict(id = self.id,
name = self.name,
equipment_req = self.equipment_req,
description = self.description)
# Set the equipment required for this course
def set_equipment_req(self, equipment_req):
self.equipment_req = equipment_req
self.save()
# Set the description of this course
def set_description(self, description):
self.description = description
self.save()
# Get all section types associated with this course
def get_all_section_types(self):
return WorkInfo.objects.filter(course=self)
# Get a specific section type associated with this course
def get_section_type(self, section_type_name): # Throws ObjectDoesNotExist, MultipleObjectsReturned
section_type = SectionType.get_section_type(section_type_name)
return WorkInfo.objects.get(course=self, section_type=section_type)
# Associate a new section type with this course
def add_section_type(self, section_type_name, work_units, work_hours): # Throws ObjectDoesNotExist
section_type = SectionType.get_section_type(section_type_name)
WorkInfo.create(self, section_type, work_units, work_hours)
# Remove association between section type and course
def remove_section_type(self, section_type_name): # Throws ObjectDoesNotExist
#section_type = SectionType.get_section_type(section_type_name)
self.get_section_type(section_type_name).delete()
#WorkInfo.create(self, section_type, work_units, work_hours)
# Retrieve all section types for this course and format them as JSON
def get_all_section_types_JSON(self):
courseSectionTypes = self.get_all_section_types()
print("Found " + str(courseSectionTypes.count()) + " course section types")
sectionTypesDictionary = {}
i = 0
for sectionType in courseSectionTypes:
print(sectionType.section_type.name)
sectionTypesDictionary[i] = {
'course_name': sectionType.course.name,
'section_type_name': sectionType.section_type.name,
'work_units': sectionType.work_units,
'work_hours': sectionType.work_hours
}
i+=1
return JsonResponse(sectionTypesDictionary)
class SectionType(models.Model):
name = models.CharField(max_length=32, unique=True) # eg. lecture or lab
@classmethod
def create(cls, name):
if len(name) > 32:
raise ValidationError("Section Type name exceeds 32 characters.")
section_type = cls(name=name)
section_type.save()
return section_type
@classmethod
def get_section_type(cls, name):
print("CHECK")
return cls.objects.filter(name=name)[0]
#return cls.objects.get(name=name)
@classmethod
def get_all_section_types(cls):
return SectionType.objects.all()
@classmethod
def get_all_section_types_list(cls):
list = []
for sectionType in SectionType.objects.all():
list.append((sectionType.name, sectionType.name))
return tuple(list)
# WorkInfo contains the user defined information for specific Course-SectionType pairs
# Each pair has an associated work units and work hours defined by the department
class WorkInfo(models.Model):
class Meta:
unique_together = (("course", "section_type"),)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
section_type = models.ForeignKey(SectionType, on_delete=models.CASCADE)
work_units = models.IntegerField(default=0)
work_hours = models.IntegerField(default=0)
#classmethod?
def getJSON(self):
return JsonResponse({
'course_name': self.course.name,
'section_type_name': self.section_type.name,
'work_units': self.work_units,
'work_hours': self.work_hours
})
@classmethod
def create(cls, course, section_type, work_units, work_hours):
work_info = cls(course=course, section_type=section_type,
work_units=work_units, work_hours=work_hours)
work_info.save()
return work_info
class Availability(models.Model):
class Meta:
unique_together = (("faculty", "days_of_week", "start_time"),)
faculty = models.OneToOneField(CUser, on_delete=models.CASCADE, null=True)
days_of_week = models.CharField(max_length=16) # MWF or TR
start_time = models.TimeField()
start_type = models.CharField(max_length=2, default="AM")
end_time = models.TimeField()
end_type = models.CharField(max_length=2, default="AM")
level = models.CharField(max_length=16) # available, preferred, unavailable
@classmethod
def create(cls, email, days, start, s_type, end, e_type, level):
faculty = CUser.get_faculty(email=email)
if days is None or len(days) > 16 or (days != "MWF" and days != "TR"):
raise ValidationError("Invalid days of week input")
elif (start is None):
raise ValidationError("Need to input start time")
elif (s_type is None):
raise ValidationError("Need to input start type")
elif (end is None):
raise ValidationError("Need to input end time")
elif (e_type is None):
raise ValidationError("Need to input end type")
elif (level is None) or (level != "available" and level != "preferred" and level != "unavailable"):
raise ValidationError("Need to input level of availability: preferred, available, or unavailable")
else:
availability = cls(faculty=faculty, days_of_week=days, start_time=start, start_type=s_type, end_time=end, end_type=e_type, level=level)
availability.save()
return availability
# ---------- Scheduling Models ----------
# Schedule is a container for scheduled sections and correponds to exactly 1 academic term
class Schedule(models.Model):
academic_term = models.CharField(max_length=16, unique=True) # eg. "Fall 2016"
state = models.CharField(max_length=16, default="active") # eg. active or finalized
def finalize_schedule(self):
self.state = "finalized"
def return_to_active(self):
self.state = "active"
@classmethod
def create(cls, academic_term, state):
if state != "finalized" and state != "active":
raise ValidationError("Invalid schedule state.")
else:
schedule = cls(academic_term=academic_term, state=state)
schedule.save()
return schedule
@classmethod
def get_schedule(cls, term_name):
return cls.objects.get(academic_term=term_name)
@classmethod
def get_all_schedules(cls):
return cls.objects.filter();
def to_json(self):
return dict(
academic_term = self.academic_term)
# Section is our systems primary scheduled object
# Each section represents a department section that is planned for a particular schedule
class Section(models.Model):
schedule = models.ForeignKey(Schedule, on_delete=models.CASCADE)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
section_type = models.ForeignKey(SectionType, null=True, on_delete=models.SET_NULL)
start_time = models.TimeField()
end_time = models.TimeField()
days = models.CharField(max_length=8) # MWF or TR
faculty = models.ForeignKey(CUser, null=True, on_delete=models.SET_NULL)
room = models.ForeignKey(Room, null=True, on_delete=models.SET_NULL)
capacity = models.IntegerField(default=0)
students_enrolled = models.IntegerField(default=0)
students_waitlisted = models.IntegerField(default=0)
conflict = models.CharField(max_length=1, default='n') # y or n
conflict_reason = models.CharField(max_length=8, null=True) # faculty or room
fault = models.CharField(max_length=1, default='n') # y or n
fault_reason = models.CharField(max_length=8, null=True) # faculty or room
@classmethod
def create(
cls, term_name, course_name, section_type, start_time, end_time, days, faculty_email, room_name,
capacity, students_enrolled, students_waitlisted, conflict,
conflict_reason, fault, fault_reason):
# these objects will actually be passed into the Section because of the ForeignKey
schedule = Schedule.get_schedule(term_name)
course = Course.get_course(course_name)
section_type = SectionType.get_section_type(section_type)
faculty = CUser.get_faculty(faculty_email)
room = Room.get_room(room_name)
if DEPARTMENT_SETTINGS.start_time and start_time < DEPARTMENT_SETTINGS.start_time:
raise ValidationError("Invalid start time for department.")
if DEPARTMENT_SETTINGS.end_time and end_time > DEPARTMENT_SETTINGS.end_time or end_time < start_time:
raise ValidationError("Invalid end time for department.")
if days != "MWF" and days != "TR":
raise ValidationError("Invalid days of the week.")
if capacity < 0:
raise ValidationError("Invalid section capacity.")
if students_enrolled < 0:
raise ValidationError("Invalid number of enrolled students.")
if students_waitlisted < 0:
raise ValidationError("Invalid number of students waitlisted.")
if conflict != 'y' and conflict != 'n':
raise ValidationError("Invalid value for conflict.")
if conflict == 'y' and conflict_reason != "faculty" and conflict_reason != "room":
raise ValidationError("Invalid conflict reason.")
if fault != 'y' and fault != 'n':
raise ValidationError("Invalid value for fault.")
if fault == 'y' and fault_reason != "faculty" and fault_reason != "room":
raise ValidationError("Invalid fault reason.")
section = cls(
schedule=schedule,
course=course,
section_type=section_type,
start_time=start_time,
end_time=end_time,
days=days,
faculty=faculty,
room=room,
capacity=capacity,
students_enrolled=students_enrolled,
students_waitlisted=students_waitlisted,
conflict=conflict,
conflict_reason=conflict_reason,
fault=fault,
fault_reason=fault_reason)
section.save()
return section
@classmethod
def get_section(cls, **kwargs):
for k,v in kwargs.iteritems():
if k == 'schedule':
return cls.objects.get(schedule=Schedule.get_schedule(v))
elif k == 'course':
return cls.objects.get(course=Course.get_course(v))
elif k == 'faculty':
return cls.objects.get(faculty=CUser.get_faculty(v))
elif k == 'room':
return cls.objects.get(room=Room.get_room(v))
else:
return cls.objects.get(k=v)
# this function takes in a dictionary object of filters that has been serialized from a JSON object based on what the user has selected
# for filtering by time, it will only take in an array of pairs (an array of 2-piece arrays) so that it will at least have a start time and end time.
#### there can also be chunks of time, so there are multiple start and end times
# for any other filter, we will pass on the keyword and array argument as it is to the filter.
@classmethod
def filter_json(cls, json_string):
return cls.filter(json.loads(json_string))
@classmethod
def filter(cls, filter_dict):
andList = []
ands = False
orList = []
ors = False
timeList = []
timeLogicList = []
timeLogic = ''
prevLogic = ''
andQuery = ''
orQuery = ''
timeQuery = ''
finalQuery = ''
for key,tags in filter_dict.iteritems():
if 'logic' not in tags or 'filters' not in tags:
raise ValidationError("JSON not set up correctly. 'logic' and 'filters' are required keys in each filter type.")
logic = tags['logic']
filters = tags['filters']
if key == "time":
for k,v in filters.iteritems():
timeLogic = logic
if k == "MWF" or k == "TR":
for times in range(len(v)):
timeList += [reduce(operator.and_, [Q(days=k), Q(start_time__gte=v[times][0]), Q(end_time__lte=v[times][1])])]
if timeList:
timeQuery = reduce(operator.or_, timeList)
else:
queryLoop = Q()
for index in range(len(filters)):
if key == "course":
filterObject = Course.get_course(filters[index])
queryLoop = reduce(operator.or_, [queryLoop, Q(course=filterObject)])
elif key == "faculty":
filterObject = CUser.get_faculty(filters[index])
queryLoop = reduce(operator.or_, [queryLoop, Q(faculty=filterObject)])
elif key == "room":
filterObject = Room.get_room(filters[index])
queryLoop = reduce(operator.or_, [queryLoop, Q(room=filterObject)])
else:
raise ValidationError("Invalid filter type.")
if 'or' in logic:
ors = True
orList += [queryLoop]
elif 'and' in logic or logic is '':
ands = True
andList += [queryLoop]
if ands is True:
andQuery = reduce(operator.and_, andList)
if (timeQuery is not None) and ('and' in timeLogic):
andQuery = reduce(operator.and_, [andQuery, timeQuery])
finalQuery = andQuery
if ors is True:
orQuery = reduce(operator.and_, orList)
if (timeQuery is not None) and ('or' in timeLogic):
orQuery = reduce(operator.or_, [orQuery, timeQuery])
if finalQuery != '':
finalQuery = reduce(operator.or_, [finalQuery, orQuery])
else:
finalQuery = orQuery
if finalQuery == '':
finalQuery = timeQuery
return Section.objects.filter(finalQuery)
class FacultyCoursePreferences(models.Model):
faculty = models.ForeignKey(CUser, on_delete = models.CASCADE)
course = models.ForeignKey(Course, on_delete = models.CASCADE)
comments = models.CharField(max_length=2048, null=True, default="No comments.")
rank = models.IntegerField(default = 0)
@classmethod
def create(cls, faculty, course, comments, rank):
course_pref = cls(
faculty=faculty,
course=course,
comments=comments,
rank=rank)
course_pref.save()
return course_pref
@classmethod
def get_faculty_pref(cls, faculty):
entries = cls.objects.filter(faculty=faculty)
return entries
@classmethod
def get_course_list(cls, faculty):
entries = cls.objects.filter(faculty=faculty)
# join the course ID to the course table
course_arr = []
for entry in entries: # go through and make list of tuples (rank, course_name, course_description, comments)
course_arr += [(entry.rank, entry.course.name, entry.course.description, entry.comments)]
course_arr.sort(key=lambda tup:tup[0]) # sort courses by rank (first spot in tuple)
return course_arr
|
en
| 0.852421
|
# System User class, # Wrapper for django builtin class, contains user + application specific data # Password must: # be 8-32 chars, have: 1 alphachar, 1 digit, 1 specialchar #$%^&*?+])[A-Za-z0-9-._!@#$%^&*?+]{8,32}$', password) is None: # If user is faculty, create an associated faculty details # Target work hours and units are initially 0 # Return cuser by email # Throws ObjectDoesNotExist # Return cuser by full name # Return faculty cuser by email # Throws ObjectDoesNotExist # Return all faculty cusers # Return faculty full name # Return scheduler cuser by email # Throws ObjectDoesNotExist # Return all scheduler cusers # Return cuser email # Set the first name # Set the last name # Set the password # The user_id uses the User ID as a primary key. # Whenever this User is deleted, this entry in the table will also be deleted # in units # in hours # 'y' or 'n' # @TODO Function to yes changed_preferences to 'n'? Also consider naming it something # more indicative -> preferences_have_changed? has_changed_preferences? etc. # ---------- Resource Models ---------- # Room represents department rooms # Course represents a department course offering # Returns all course objects # Returns course by name # Set the equipment required for this course # Set the description of this course # Get all section types associated with this course # Get a specific section type associated with this course # Throws ObjectDoesNotExist, MultipleObjectsReturned # Associate a new section type with this course # Throws ObjectDoesNotExist # Remove association between section type and course # Throws ObjectDoesNotExist #section_type = SectionType.get_section_type(section_type_name) #WorkInfo.create(self, section_type, work_units, work_hours) # Retrieve all section types for this course and format them as JSON # eg. lecture or lab #return cls.objects.get(name=name) # WorkInfo contains the user defined information for specific Course-SectionType pairs # Each pair has an associated work units and work hours defined by the department #classmethod? # MWF or TR # available, preferred, unavailable # ---------- Scheduling Models ---------- # Schedule is a container for scheduled sections and correponds to exactly 1 academic term # eg. "Fall 2016" # eg. active or finalized # Section is our systems primary scheduled object # Each section represents a department section that is planned for a particular schedule # MWF or TR # y or n # faculty or room # y or n # faculty or room # these objects will actually be passed into the Section because of the ForeignKey # this function takes in a dictionary object of filters that has been serialized from a JSON object based on what the user has selected # for filtering by time, it will only take in an array of pairs (an array of 2-piece arrays) so that it will at least have a start time and end time. #### there can also be chunks of time, so there are multiple start and end times # for any other filter, we will pass on the keyword and array argument as it is to the filter. # join the course ID to the course table # go through and make list of tuples (rank, course_name, course_description, comments) # sort courses by rank (first spot in tuple)
| 2.151053
| 2
|
piece.py
|
fdusek/Checkers
| 0
|
6625947
|
<gh_stars>0
from enums import *
class Piece(object):
def __init__(self, piece_type):
if isinstance(piece_type, Piece_type):
self.ptype = piece_type
else:
raise ValueError("Piece type has to be instance of 'class Piece_type (Enum)' it is " + str(piece_type))
def promoted(self):
piece_type = self.ptype
if (piece_type == Piece_type.black_promoted) or (piece_type == Piece_type.white_promoted):
return True
else:
return False
def vectors(self):
if self.promoted():
return [(1, 1), (-1, 1), (1, -1), (-1, -1)] # this piece can move in all diagonal directions
if self.ptype == Piece_type.black:
return [(1, 1), (-1, 1)] # this piece can move up diagonally
if self.ptype == Piece_type.white:
return [(1, -1), (-1, -1)] # this piece can move down diagonally
raise ValueError("This piece does not have move vectors defined for its type. Piece type = " + str(self.ptype))
def __eq__(self, other):
return (other is not None) and (self.ptype == other.ptype)
def faction(self):
piece_type = self.ptype
# print(self.ptype)
if (piece_type == Piece_type.black) or (piece_type == Piece_type.black_promoted):
return Faction_type.black
if (piece_type == Piece_type.white) or (piece_type == Piece_type.white_promoted):
return Faction_type.white
raise ValueError(
"This piece does not have a Faction_type. Did you forget to remove highlighted piece? Piece type is " + str(
self.ptype))
|
from enums import *
class Piece(object):
def __init__(self, piece_type):
if isinstance(piece_type, Piece_type):
self.ptype = piece_type
else:
raise ValueError("Piece type has to be instance of 'class Piece_type (Enum)' it is " + str(piece_type))
def promoted(self):
piece_type = self.ptype
if (piece_type == Piece_type.black_promoted) or (piece_type == Piece_type.white_promoted):
return True
else:
return False
def vectors(self):
if self.promoted():
return [(1, 1), (-1, 1), (1, -1), (-1, -1)] # this piece can move in all diagonal directions
if self.ptype == Piece_type.black:
return [(1, 1), (-1, 1)] # this piece can move up diagonally
if self.ptype == Piece_type.white:
return [(1, -1), (-1, -1)] # this piece can move down diagonally
raise ValueError("This piece does not have move vectors defined for its type. Piece type = " + str(self.ptype))
def __eq__(self, other):
return (other is not None) and (self.ptype == other.ptype)
def faction(self):
piece_type = self.ptype
# print(self.ptype)
if (piece_type == Piece_type.black) or (piece_type == Piece_type.black_promoted):
return Faction_type.black
if (piece_type == Piece_type.white) or (piece_type == Piece_type.white_promoted):
return Faction_type.white
raise ValueError(
"This piece does not have a Faction_type. Did you forget to remove highlighted piece? Piece type is " + str(
self.ptype))
|
en
| 0.816938
|
# this piece can move in all diagonal directions # this piece can move up diagonally # this piece can move down diagonally # print(self.ptype)
| 3.397829
| 3
|
setup.py
|
vd2org/runigma
| 1
|
6625948
|
# Copyright (C) 2016-2019 by Vd.
# Copyright (C) 2012 by <NAME>.
# This file is part of RuNigma, the RuNigma Machine.
# RuNigma is released under the MIT License (see LICENSE).
import setuptools
from os.path import join, dirname
import runigma
setuptools.setup(
name='runigma',
version=runigma.__version__,
author='Vd',
author_email='<EMAIL>',
url='https://github.com/vd2org/runigma',
license='MIT',
description='RuNigma is a fictional cypher machine inspired by World War 2''s Enigma Machines.',
long_description=open(join(dirname(__file__), 'README.md')).read(),
packages=['runigma', 'runigma.rotors', 'runigma.tests'],
scripts=['runigma/bin/runigma', 'runigma/bin/runigma-sheet'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Intended Audience :: Other Audience',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Communications',
'Topic :: Security',
'Topic :: Security :: Cryptography',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
|
# Copyright (C) 2016-2019 by Vd.
# Copyright (C) 2012 by <NAME>.
# This file is part of RuNigma, the RuNigma Machine.
# RuNigma is released under the MIT License (see LICENSE).
import setuptools
from os.path import join, dirname
import runigma
setuptools.setup(
name='runigma',
version=runigma.__version__,
author='Vd',
author_email='<EMAIL>',
url='https://github.com/vd2org/runigma',
license='MIT',
description='RuNigma is a fictional cypher machine inspired by World War 2''s Enigma Machines.',
long_description=open(join(dirname(__file__), 'README.md')).read(),
packages=['runigma', 'runigma.rotors', 'runigma.tests'],
scripts=['runigma/bin/runigma', 'runigma/bin/runigma-sheet'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Intended Audience :: Other Audience',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Communications',
'Topic :: Security',
'Topic :: Security :: Cryptography',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
|
en
| 0.865082
|
# Copyright (C) 2016-2019 by Vd. # Copyright (C) 2012 by <NAME>. # This file is part of RuNigma, the RuNigma Machine. # RuNigma is released under the MIT License (see LICENSE).
| 1.309097
| 1
|
dvc/cache/gdrive.py
|
Christoph-1/dvc
| 0
|
6625949
|
from .base import CloudCache
class GDriveCache(CloudCache):
DEFAULT_VERIFY = True
|
from .base import CloudCache
class GDriveCache(CloudCache):
DEFAULT_VERIFY = True
|
none
| 1
| 1.335187
| 1
|
|
e2e/scripts/st_arrow_table_styling.py
|
ChangHoon-Sung/streamlit
| 1
|
6625950
|
# Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adapted from https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html
"""
import numpy as np
import pandas as pd
import streamlit as st
def color_negative_red(val):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = "red" if val < 0 else "black"
return "color: %s" % color
def highlight_max(data, color="yellow"):
"""highlight the maximum in a Series or DataFrame"""
attr = "background-color: {}".format(color)
if data.ndim == 1: # Series from .apply(axis=0) or axis=1
is_max = data == data.max()
return [attr if v else "" for v in is_max]
else: # from .apply(axis=None)
is_max = data == data.max().max()
return pd.DataFrame(
np.where(is_max, attr, ""), index=data.index, columns=data.columns
)
# Create a table to be styled in various ways
np.random.seed(24)
df = pd.DataFrame({"A": np.linspace(1, 5, 5)})
df = pd.concat([df, pd.DataFrame(np.random.randn(5, 4), columns=list("BCDE"))], axis=1)
df.iloc[0, 2] = np.nan
# Unstyled
st._arrow_table(df)
# Custom formatting
st._arrow_table(df.style.format("{:.2%}"))
# Colors
st._arrow_table(
df.style.applymap(color_negative_red).apply(
highlight_max, color="darkorange", axis=0
)
)
# Add rows throws an exception when the dataframe has a styler
x = st._arrow_table(
df.style.set_properties(**{"background-color": "black", "color": "lawngreen"})
)
x._arrow_add_rows(
pd.DataFrame(np.random.randn(3, 5)).style.set_properties(
**{"background-color": "lawngreen", "color": "black"}
)
)
|
# Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adapted from https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html
"""
import numpy as np
import pandas as pd
import streamlit as st
def color_negative_red(val):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = "red" if val < 0 else "black"
return "color: %s" % color
def highlight_max(data, color="yellow"):
"""highlight the maximum in a Series or DataFrame"""
attr = "background-color: {}".format(color)
if data.ndim == 1: # Series from .apply(axis=0) or axis=1
is_max = data == data.max()
return [attr if v else "" for v in is_max]
else: # from .apply(axis=None)
is_max = data == data.max().max()
return pd.DataFrame(
np.where(is_max, attr, ""), index=data.index, columns=data.columns
)
# Create a table to be styled in various ways
np.random.seed(24)
df = pd.DataFrame({"A": np.linspace(1, 5, 5)})
df = pd.concat([df, pd.DataFrame(np.random.randn(5, 4), columns=list("BCDE"))], axis=1)
df.iloc[0, 2] = np.nan
# Unstyled
st._arrow_table(df)
# Custom formatting
st._arrow_table(df.style.format("{:.2%}"))
# Colors
st._arrow_table(
df.style.applymap(color_negative_red).apply(
highlight_max, color="darkorange", axis=0
)
)
# Add rows throws an exception when the dataframe has a styler
x = st._arrow_table(
df.style.set_properties(**{"background-color": "black", "color": "lawngreen"})
)
x._arrow_add_rows(
pd.DataFrame(np.random.randn(3, 5)).style.set_properties(
**{"background-color": "lawngreen", "color": "black"}
)
)
|
en
| 0.748949
|
# Copyright 2018-2022 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Adapted from https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html Takes a scalar and returns a string with the css property `'color: red'` for negative strings, black otherwise. highlight the maximum in a Series or DataFrame # Series from .apply(axis=0) or axis=1 # from .apply(axis=None) # Create a table to be styled in various ways # Unstyled # Custom formatting # Colors # Add rows throws an exception when the dataframe has a styler
| 3.379432
| 3
|