gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""
Implements a client class to query the
`Deezer API <https://developers.deezer.com/api>`_
"""
from __future__ import annotations
from typing import Any
import requests
from deezer.exceptions import (
DeezerErrorResponse,
DeezerHTTPError,
DeezerUnknownResource,
)
from deezer.pagination import PaginatedList
from deezer.resources import (
Album,
Artist,
Chart,
Editorial,
Episode,
Genre,
Playlist,
Podcast,
Radio,
Resource,
Track,
User,
)
class Client:
"""
A client to retrieve some basic infos about Deezer resourses.
Create a client instance with the given options. Options should
be passed in to the constructor as kwargs.
>>> import deezer
>>> client = deezer.Client(app_id='foo', app_secret='bar')
This client provides several method to retrieve the content of most
sort of Deezer objects, based on their json structure.
Headers can be forced by using the ``headers`` kwarg.
For example, use ``Accept-Language`` header to force the output language.
>>> import deezer
>>> client = deezer.Client(headers={'Accept-Language': 'fr'})
:param app_id: application ID.
:param app_secret: application secret.
:param access_token: user access token.
:param headers: a dictionary of headers to be used.
"""
objects_types = {
"album": Album,
"artist": Artist,
"chart": Chart,
"editorial": Editorial,
"episode": Episode,
# 'folder': None, # need identification
"genre": Genre,
"playlist": Playlist,
"podcast": Podcast,
"radio": Radio,
"search": None,
"track": Track,
"user": User,
}
base_url = "https://api.deezer.com"
def __init__(
self, app_id=None, app_secret=None, access_token=None, headers=None, **kwargs
):
self.app_id = app_id
self.app_secret = app_secret
self.access_token = access_token
self.session = requests.Session()
headers = headers or {}
self.session.headers.update(headers)
def _process_json(
self,
item: dict[str, Any],
parent: Resource | None = None,
resource_type: type[Resource] | None = None,
paginate_list=False,
):
"""
Recursively convert dictionary
to :class:`~deezer.resources.Resource` object
:param item: the JSON response as dict.
:param parent: A reference to the parent resource, to avoid fetching again.
:param resource_type: The resource class to use as top level.
:param paginate_list: Whether to wrap list into a pagination object.
:returns: instance of :class:`~deezer.resources.Resource`
"""
if "data" in item:
parsed_data = [
self._process_json(i, parent, paginate_list=False) for i in item["data"]
]
if not paginate_list:
return parsed_data
item["data"] = parsed_data
return item
result = {}
for key, value in item.items():
if isinstance(value, dict) and ("type" in value or "data" in value):
value = self._process_json(value, parent)
result[key] = value
if parent is not None:
result[parent.type] = parent
if "type" in result:
if result["type"] in self.objects_types:
object_class = self.objects_types[result["type"]]
else:
# in case any new types are introduced by the API
object_class = Resource
elif resource_type:
object_class = resource_type
else:
raise DeezerUnknownResource(f"Unable to find resource type for {result!r}")
return object_class(self, result)
def request(
self,
method: str,
path: str,
parent: Resource | None = None,
resource_type: type[Resource] | None = None,
paginate_list=False,
**params,
):
"""
Make a request to the API and parse the response.
:param method: HTTP verb to use: GET, POST< DELETE, ...
:param path: The path to make the API call to (e.g. 'artist/1234').
:param parent: A reference to the parent resource, to avoid fetching again.
:param resource_type: The resource class to use as top level.
:param paginate_list: Whether to wrap list into a pagination object.
:param params: Query parameters to add to the request
"""
if self.access_token is not None:
params["access_token"] = str(self.access_token)
response = self.session.request(
method,
f"{self.base_url}/{path}",
params=params,
)
try:
response.raise_for_status()
except requests.HTTPError as exc:
raise DeezerHTTPError.from_http_error(exc) from exc
json_data = response.json()
if not isinstance(json_data, dict):
return json_data
if "error" in json_data:
raise DeezerErrorResponse(json_data)
return self._process_json(
json_data,
parent=parent,
resource_type=resource_type,
paginate_list=paginate_list,
)
def _get_paginated_list(self, path, **params):
return PaginatedList(client=self, base_path=path, **params)
def get_album(self, album_id: int) -> Album:
"""
Get the album with the given ID.
:returns: an :class:`~deezer.resources.Album` object
"""
return self.request("GET", f"album/{album_id}")
def rate_album(self, album_id: int, note: int) -> bool:
"""
Rate the album of the given ID with the given note.
The note should be and integer between 1 and 5.
:returns: boolean whether rating was applied
"""
return self.request("POST", f"album/{album_id}", note=note)
def get_artist(self, artist_id: int) -> Artist:
"""
Get the artist with the given ID.
:returns: an :class:`~deezer.resources.Artist` object
"""
return self.request("GET", f"artist/{artist_id}")
def get_chart(self) -> Chart:
"""
Get overall charts for tracks, albums, artists and playlists.
Combine charts of several resources in one endpoint.
:returns: a :class:`~deezer.resources.Chart` instance.
"""
return self.request("GET", "chart", resource_type=Chart)
def get_tracks_chart(self) -> list[Track]:
"""
Get top tracks.
:return: a list of :class:`~deezer.resources.Track` instances.
"""
return self.request("GET", "chart/0/tracks")
def get_albums_chart(self) -> list[Album]:
"""
Get top albums.
:return: a list of :class:`~deezer.resources.Album` instances.
"""
return self.request("GET", "chart/0/albums")
def get_artists_chart(self) -> list[Artist]:
"""
Get top artists.
:return: a list of :class:`~deezer.resources.Artist` instances.
"""
return self.request("GET", "chart/0/artists")
def get_playlists_chart(self) -> list[Playlist]:
"""
Get top playlists.
:return: a list of :class:`~deezer.resources.Playlist` instances.
"""
return self.request("GET", "chart/0/playlists")
def get_podcasts_chart(self) -> list[Podcast]:
"""
Get top podcasts.
:return: a list of :class:`~deezer.resources.Podcast` instances.
"""
return self.request("GET", "chart/0/podcasts")
def get_editorial(self, editorial_id: int) -> Editorial:
"""
Get the editorial with the given ID.
:returns: a :class:`~deezer.resources.Editorial` object.
"""
return self.request("GET", f"editorial/{editorial_id}")
def list_editorials(self) -> PaginatedList[Editorial]:
"""
List editorials.
:returns: a :class:`~deezer.pagination.PaginatedList`
of :class:`~deezer.resources.Editorial` objects.
"""
return self._get_paginated_list("editorial")
def get_episode(self, episode_id: int) -> Episode:
"""
Get the episode with the given ID.
:returns: a :class:`~deezer.resources.Episode` object
"""
return self.request("GET", f"episode/{episode_id}")
def get_genre(self, genre_id: int) -> Genre:
"""
Get the genre with the given ID
:returns: a :class:`~deezer.resources.Genre` object
"""
return self.request("GET", f"genre/{genre_id}")
def list_genres(self) -> list[Genre]:
"""
List musical genres.
:return: a list of :class:`~deezer.resources.Genre` instances
"""
return self.request("GET", "genre")
def get_playlist(self, playlist_id: int) -> Playlist:
"""
Get the playlist with the given ID.
:returns: a :class:`~deezer.resources.Playlist` object
"""
return self.request("GET", f"playlist/{playlist_id}")
def get_podcast(self, podcast_id: int) -> Podcast:
"""
Get the podcast with the given ID.
:returns: a :class:`~deezer.resources.Podcast` object
"""
return self.request("GET", f"podcast/{podcast_id}")
def get_radio(self, radio_id: int) -> Radio:
"""
Get the radio with the given ID.
:returns: a :class:`~deezer.resources.Radio` object
"""
return self.request("GET", f"radio/{radio_id}")
def list_radios(self) -> list[Radio]:
"""
List radios.
:return: a list of :class:`~deezer.resources.Radio` instances
"""
return self.request("GET", "radio")
def get_radios_top(self) -> PaginatedList[Radio]:
"""
Get the top radios.
:returns: a :class:`~deezer.pagination.PaginatedList`
of :class:`~deezer.resources.Radio` objects.
"""
return self._get_paginated_list("radio/top")
def get_track(self, track_id: int) -> Track:
"""
Get the track with the given ID.
:returns: a :class:`~deezer.resources.Track` object
"""
return self.request("GET", f"track/{track_id}")
def get_user(self, user_id: int | None = None) -> User:
"""
Get the user with the given ID.
:returns: a :class:`~deezer.resources.User` object
"""
user_id_str = str(user_id) if user_id else "me"
return self.request("GET", f"user/{user_id_str}")
def get_user_albums(self, user_id: int | None = None) -> PaginatedList[Album]:
"""
Get the favourites albums for the given user_id if provided or current user if not.
:param user_id: the user ID to get favourites albums.
:return: a list of :class:`~deezer.resources.Album` instances.
"""
user_id_str = str(user_id) if user_id else "me"
return self._get_paginated_list(f"user/{user_id_str}/albums")
def add_user_album(self, album_id: int) -> bool:
"""
Add an album to the user's library
:param album_id: the ID of the album to add.
:return: boolean whether the operation succeeded.
"""
return self.request("POST", "user/me/albums", album_id=album_id)
def remove_user_album(self, album_id: int) -> bool:
"""
Remove an album from the user's library
:param album_id: the ID of the album to remove.
:return: boolean whether the operation succeeded.
"""
return self.request("DELETE", "user/me/albums", album_id=album_id)
def get_user_artists(self, user_id: int | None = None) -> PaginatedList[Artist]:
"""
Get the favourites artists for the given user_id if provided or current user if not.
:param user_id: the user ID to get favourites artists.
:return: a :class:`~deezer.pagination.PaginatedList`
of :class:`~deezer.resources.Artist` instances.
"""
user_id_str = str(user_id) if user_id else "me"
return self._get_paginated_list(f"user/{user_id_str}/artists")
def add_user_artist(self, artist_id: int) -> bool:
"""
Add an artist to the user's library
:param artist_id: the ID of the artist to add.
:return: boolean whether the operation succeeded.
"""
return self.request("POST", "user/me/artists", artist_id=artist_id)
def remove_user_artist(self, artist_id: int) -> bool:
"""
Remove an artist from the user's library
:param artist_id: the ID of the artist to remove.
:return: boolean whether the operation succeeded.
"""
return self.request("DELETE", "user/me/artists", artist_id=artist_id)
def get_user_history(self) -> PaginatedList[Track]:
"""
Returns a list of the recently played tracks for the current user.
:return: a :class:`~deezer.pagination.PaginatedList`
of :class:`~deezer.resources.Track` instances.
"""
return self._get_paginated_list("user/me/history")
def get_user_tracks(self, user_id: int | None = None) -> PaginatedList[Track]:
"""
Get the favourites tracks for the given user_id if provided or current user if not.
:param user_id: the user ID to get favourites tracks.
:return: a :class:`~deezer.pagination.PaginatedList`
of :class:`~deezer.resources.Track` instances.
"""
user_id_str = str(user_id) if user_id else "me"
return self._get_paginated_list(f"user/{user_id_str}/tracks")
def add_user_track(self, track_id: int) -> bool:
"""
Add a track to the user's library
:param track_id: the ID of the track to add.
:return: boolean whether the operation succeeded.
"""
return self.request("POST", "user/me/tracks", track_id=track_id)
def remove_user_track(self, track_id: int) -> bool:
"""
Remove a track from the user's library
:param track_id: the ID of the track to remove.
:return: boolean whether the operation succeeded.
"""
return self.request("DELETE", "user/me/tracks", track_id=track_id)
def _search(
self,
path: str,
query: str = "",
strict: bool | None = None,
ordering: str | None = None,
**advanced_params: str | int | None,
):
optional_params = {}
if strict is True:
optional_params["strict"] = "on"
if ordering:
optional_params["ordering"] = ordering
query_parts = []
if query:
query_parts.append(query)
for param_name, param_value in advanced_params.items():
if param_value:
query_parts.append(f'{param_name}:"{param_value}"')
return self._get_paginated_list(
path=f"search/{path}" if path else "search",
q=" ".join(query_parts),
**optional_params,
)
def search(
self,
query: str = "",
strict: bool | None = None,
ordering: str | None = None,
artist: str | None = None,
album: str | None = None,
track: str | None = None,
label: str | None = None,
dur_min: int | None = None,
dur_max: int | None = None,
bpm_min: int | None = None,
bpm_max: int | None = None,
):
"""
Search tracks.
Advanced search is available by either formatting the query yourself or
by using the dedicated keywords arguments.
:param query: the query to search for, this is directly passed as q query.
:param strict: whether to disable fuzzy search and enable strict mode.
:param ordering: see Deezer API docs for possible values.
:param artist: parameter for the advanced search feature.
:param album: parameter for the advanced search feature.
:param track: parameter for the advanced search feature.
:param label: parameter for the advanced search feature.
:param dur_min: parameter for the advanced search feature.
:param dur_max: parameter for the advanced search feature.
:param bpm_min: parameter for the advanced search feature.
:param bpm_max: parameter for the advanced search feature.
:returns: a list of :class:`~deezer.resources.Track` instances.
"""
return self._search(
"",
query=query,
strict=strict,
ordering=ordering,
artist=artist,
album=album,
track=track,
label=label,
dur_min=dur_min,
dur_max=dur_max,
bpm_min=bpm_min,
bpm_max=bpm_max,
)
def search_albums(
self,
query: str = "",
strict: bool | None = None,
ordering: str | None = None,
) -> PaginatedList[Album]:
"""
Search albums matching the given query.
:param query: the query to search for, this is directly passed as q query.
:param strict: whether to disable fuzzy search and enable strict mode.
:param ordering: see Deezer API docs for possible values.
:return: list of :class:`~deezer.resources.Album` instances.
"""
return self._search(
path="album",
query=query,
strict=strict,
ordering=ordering,
)
def search_artists(
self,
query: str = "",
strict: bool | None = None,
ordering: str | None = None,
) -> PaginatedList[Artist]:
"""
Search artists matching the given query.
:param query: the query to search for, this is directly passed as q query.
:param strict: whether to disable fuzzy search and enable strict mode.
:param ordering: see Deezer API docs for possible values.
:return: list of :class:`~deezer.resources.Album` instances.
"""
return self._search(
path="artist",
query=query,
strict=strict,
ordering=ordering,
)
|
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import shutil
import tempfile
import time
import unittest
from google.appengine.api import yaml_errors
from google.appengine.ext import db
from mapreduce import errors
from mapreduce import handlers
from mapreduce import status
from testlib import testutil
from mapreduce import test_support
from google.appengine.ext.webapp import mock_webapp
class TestKind(db.Model):
"""Used for testing."""
foobar = db.StringProperty(default="meep")
def TestMap(entity):
"""Used for testing."""
pass
class MapreduceYamlTest(unittest.TestCase):
"""Testing mapreduce.yaml-related functionality."""
def set_up_directory_tree(self, dir_tree_contents):
"""Create directory tree from dict of path:contents entries."""
for full_path, contents in dir_tree_contents.iteritems():
dir_name = os.path.dirname(full_path)
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
f = open(full_path, 'w')
f.write(contents)
f.close()
def setUp(self):
"""Initialize temporary application variable."""
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
"""Remove temporary application directory."""
if self.tempdir:
shutil.rmtree(self.tempdir)
def testFindYamlFile(self):
"""Test if mapreduce.yaml can be found with different app/library trees."""
test_status = os.path.join(self.tempdir, "library_root", "google",
"appengine", "ext", "mapreduce", "status.py")
test_mapreduce_yaml = os.path.join(self.tempdir, "application_root",
"mapreduce.yaml")
test_dict = {
test_status: "test",
test_mapreduce_yaml: "test",
}
self.set_up_directory_tree(test_dict)
os.chdir(os.path.dirname(test_mapreduce_yaml))
yaml_loc = status.find_mapreduce_yaml(status_file=test_status)
self.assertEqual(test_mapreduce_yaml, yaml_loc)
def testFindYamlFileSameTree(self):
"""Test if mapreduce.yaml can be found with the same app/library tree."""
test_status = os.path.join(self.tempdir, "application_root", "google",
"appengine", "ext", "mapreduce", "status.py")
test_mapreduce_yaml = os.path.join(self.tempdir, "application_root",
"mapreduce.yaml")
test_dict = {
test_status: "test",
test_mapreduce_yaml: "test",
}
self.set_up_directory_tree(test_dict)
os.chdir(os.path.dirname(test_mapreduce_yaml))
yaml_loc = status.find_mapreduce_yaml(status_file=test_status)
self.assertEqual(test_mapreduce_yaml, yaml_loc)
def testParseEmptyFile(self):
"""Parsing empty mapreduce.yaml file."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"")
def testParse(self):
"""Parsing a single document in mapreduce.yaml."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params_validator: Validator1\n"
" params:\n"
" - name: entity_kind\n"
" default: Kind1\n"
" - name: human_supplied1\n"
" - name: human_supplied2\n"
"- name: Mapreduce2\n"
" mapper:\n"
" handler: Handler2\n"
" input_reader: Reader2\n")
self.assertTrue(mr_yaml)
self.assertEquals(2, len(mr_yaml.mapreduce))
self.assertEquals("Mapreduce1", mr_yaml.mapreduce[0].name)
self.assertEquals("Handler1", mr_yaml.mapreduce[0].mapper.handler)
self.assertEquals("Reader1", mr_yaml.mapreduce[0].mapper.input_reader)
self.assertEquals("Validator1",
mr_yaml.mapreduce[0].mapper.params_validator)
self.assertEquals(3, len(mr_yaml.mapreduce[0].mapper.params))
self.assertEquals("entity_kind", mr_yaml.mapreduce[0].mapper.params[0].name)
self.assertEquals("Kind1", mr_yaml.mapreduce[0].mapper.params[0].default)
self.assertEquals("human_supplied1",
mr_yaml.mapreduce[0].mapper.params[1].name)
self.assertEquals("human_supplied2",
mr_yaml.mapreduce[0].mapper.params[2].name)
self.assertEquals("Mapreduce2", mr_yaml.mapreduce[1].name)
self.assertEquals("Handler2", mr_yaml.mapreduce[1].mapper.handler)
self.assertEquals("Reader2", mr_yaml.mapreduce[1].mapper.input_reader)
def testParseOutputWriter(self):
"""Parsing a single document in mapreduce.yaml with output writer."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" output_writer: Writer1\n"
)
self.assertTrue(mr_yaml)
self.assertEquals(1, len(mr_yaml.mapreduce))
self.assertEquals("Mapreduce1", mr_yaml.mapreduce[0].name)
self.assertEquals("Handler1", mr_yaml.mapreduce[0].mapper.handler)
self.assertEquals("Reader1", mr_yaml.mapreduce[0].mapper.input_reader)
self.assertEquals("Writer1", mr_yaml.mapreduce[0].mapper.output_writer)
def testParseMissingRequiredAttrs(self):
"""Test parsing with missing required attributes."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n")
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" input_reader: Reader1\n")
def testBadValues(self):
"""Tests when some yaml values are of the wrong type."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params:\n"
" - name: $$Invalid$$\n")
def testMultipleDocuments(self):
"""Tests when multiple documents are present."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
"---")
def testOverlappingNames(self):
"""Tests when there are jobs with the same name."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n")
def testToDict(self):
"""Tests encoding the MR document as JSON."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params_validator: Validator1\n"
" params:\n"
" - name: entity_kind\n"
" default: Kind1\n"
" - name: human_supplied1\n"
" - name: human_supplied2\n"
"- name: Mapreduce2\n"
" mapper:\n"
" handler: Handler2\n"
" input_reader: Reader2\n")
all_configs = status.MapReduceYaml.to_dict(mr_yaml)
self.assertEquals(
[
{
'name': 'Mapreduce1',
'mapper_params_validator': 'Validator1',
'mapper_params': {
'entity_kind': 'Kind1',
'human_supplied2': None,
'human_supplied1': None},
'mapper_handler': 'Handler1',
'mapper_input_reader': 'Reader1'
},
{
'mapper_input_reader': 'Reader2',
'mapper_handler': 'Handler2',
'name': 'Mapreduce2'
}
], all_configs)
def testToDictOutputWriter(self):
"""Tests encoding the MR document with output writer as JSON."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" output_writer: Writer1\n"
)
all_configs = status.MapReduceYaml.to_dict(mr_yaml)
self.assertEquals(
[
{
'name': 'Mapreduce1',
'mapper_handler': 'Handler1',
'mapper_input_reader': 'Reader1',
'mapper_output_writer': 'Writer1',
},
], all_configs)
class ResourceTest(testutil.HandlerTestBase):
"""Tests for the resource handler."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
self.handler = status.ResourceHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/path"
def testPaths(self):
"""Tests that paths are accessible."""
self.handler.get("status")
self.assertTrue(self.handler.response.out.getvalue().startswith(
"<!DOCTYPE html>"))
self.assertEquals("text/html",
self.handler.response.headers["Content-Type"])
self.handler.response.out.truncate(0)
self.handler.get("jquery.js")
self.assertTrue(self.handler.response.out.getvalue().startswith(
"/*!"))
self.assertEquals("text/javascript",
self.handler.response.headers["Content-Type"])
def testCachingHeaders(self):
"""Tests that caching headers are correct."""
self.handler.get("status")
self.assertEquals("public; max-age=300",
self.handler.response.headers["Cache-Control"])
def testMissing(self):
"""Tests when a resource is requested that doesn't exist."""
self.handler.get("unknown")
self.assertEquals(404, self.handler.response.status)
class ListConfigsTest(testutil.HandlerTestBase):
"""Tests for the ListConfigsHandler."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
self.handler = status.ListConfigsHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/command/path"
self.handler.request.headers["X-Requested-With"] = "XMLHttpRequest"
def testCSRF(self):
"""Test that we check the X-Requested-With header."""
del self.handler.request.headers["X-Requested-With"]
self.handler.get()
self.assertEquals(403, self.handler.response.status)
def testBasic(self):
"""Tests listing available configs."""
old_get_yaml = status.get_mapreduce_yaml
status.get_mapreduce_yaml = lambda: status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params_validator: Validator1\n"
" params:\n"
" - name: entity_kind\n"
" default: Kind1\n"
" - name: human_supplied1\n"
" - name: human_supplied2\n"
"- name: Mapreduce2\n"
" mapper:\n"
" handler: Handler2\n"
" input_reader: Reader2\n"
" params_validator: MapreduceValidator\n"
" params:\n"
" - name: foo\n"
" value: bar\n")
try:
self.handler.get()
finally:
status.get_mapreduce_yaml = old_get_yaml
self.assertEquals(
{u'configs': [
{u'mapper_params_validator': u'Validator1',
u'mapper_params': {
u'entity_kind': u'Kind1',
u'human_supplied2': None,
u'human_supplied1': None},
u'mapper_input_reader': u'Reader1',
u'mapper_handler': u'Handler1',
u'name': u'Mapreduce1'},
{u'mapper_input_reader': u'Reader2',
u'mapper_handler': u'Handler2',
u'name': u'Mapreduce2',
u'params': {
u'foo': u'bar',},
}]},
json.loads(self.handler.response.out.getvalue()))
self.assertEquals("text/javascript",
self.handler.response.headers["Content-Type"])
class ListJobsTest(testutil.HandlerTestBase):
"""Tests listing active and inactive jobs."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
self.start = handlers.StartJobHandler()
self.start.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.start.request.path = "/mapreduce/command/start"
self.start.request.set(
"mapper_input_reader",
"mapreduce.input_readers.DatastoreInputReader")
self.start.request.set("mapper_handler", "__main__.TestMap")
self.start.request.set("mapper_params.entity_kind", "__main__.TestKind")
self.start.request.headers["X-Requested-With"] = "XMLHttpRequest"
self.handler = status.ListJobsHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/command/list"
self.handler.request.headers["X-Requested-With"] = "XMLHttpRequest"
def testCSRF(self):
"""Test that we check the X-Requested-With header."""
TestKind().put()
del self.start.request.headers["X-Requested-With"]
self.start.post()
self.assertEquals(403, self.start.response.status)
del self.handler.request.headers["X-Requested-With"]
self.handler.get()
self.assertEquals(403, self.handler.response.status)
def testBasic(self):
"""Tests when there are fewer than the max results to render."""
TestKind().put()
self.start.request.set("name", "my job 1")
self.start.post()
time.sleep(.1)
self.start.request.set("name", "my job 2")
self.start.post()
time.sleep(.1)
self.start.request.set("name", "my job 3")
self.start.post()
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
expected_args = set([
"active",
"active_shards",
"chart_url",
"chart_width",
"mapreduce_id",
"name",
"shards",
"start_timestamp_ms",
"updated_timestamp_ms",
])
self.assertEquals(3, len(result["jobs"]))
self.assertEquals("my job 3", result["jobs"][0]["name"])
self.assertEquals("my job 2", result["jobs"][1]["name"])
self.assertEquals("my job 1", result["jobs"][2]["name"])
self.assertEquals(expected_args, set(result["jobs"][0].keys()))
self.assertEquals(expected_args, set(result["jobs"][1].keys()))
self.assertEquals(expected_args, set(result["jobs"][2].keys()))
def testCursor(self):
"""Tests when a job cursor is present."""
TestKind().put()
self.start.request.set("name", "my job 1")
self.start.post()
time.sleep(.1) # Can not start two jobs before time advances
self.start.request.set("name", "my job 2")
self.start.post()
self.handler.request.set("count", "1")
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
self.assertEquals(1, len(result["jobs"]))
self.assertTrue("cursor" in result)
self.handler.response.out.truncate(0)
self.handler.request.set("count", "1")
self.handler.request.set("cursor", result['cursor'])
self.handler.get()
result2 = json.loads(self.handler.response.out.getvalue())
self.assertEquals(1, len(result2["jobs"]))
self.assertFalse("cursor" in result2)
def testNoJobs(self):
"""Tests when there are no jobs."""
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
self.assertEquals({'jobs': []}, result)
class GetJobDetailTest(testutil.HandlerTestBase):
"""Tests listing job status detail."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
for _ in range(100):
TestKind().put()
self.start = handlers.StartJobHandler()
self.start.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.start.request.path = "/mapreduce/command/start"
self.start.request.set("name", "my job 1")
self.start.request.set(
"mapper_input_reader",
"mapreduce.input_readers.DatastoreInputReader")
self.start.request.set("mapper_handler", "__main__.TestMap")
self.start.request.set("mapper_params.entity_kind", "__main__.TestKind")
self.start.request.headers["X-Requested-With"] = "XMLHttpRequest"
self.start.post()
result = json.loads(self.start.response.out.getvalue())
self.mapreduce_id = result["mapreduce_id"]
self.handler = status.GetJobDetailHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/command/list"
self.handler.request.headers["X-Requested-With"] = "XMLHttpRequest"
def KickOffMapreduce(self):
"""Executes pending kickoff task."""
test_support.execute_all_tasks(self.taskqueue)
def testCSRF(self):
"""Test that we check the X-Requested-With header."""
del self.handler.request.headers["X-Requested-With"]
self.handler.get()
self.assertEquals(403, self.handler.response.status)
def testBasic(self):
"""Tests getting the job details."""
self.KickOffMapreduce()
self.handler.request.set("mapreduce_id", self.mapreduce_id)
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
expected_keys = set([
"active", "chart_url", "counters", "mapper_spec", "mapreduce_id",
"name", "result_status", "shards", "start_timestamp_ms",
"updated_timestamp_ms", "params", "hooks_class_name", "chart_width"])
expected_shard_keys = set([
"active", "counters", "last_work_item", "result_status",
"shard_description", "shard_id", "shard_number",
"updated_timestamp_ms"])
self.assertEquals(expected_keys, set(result.keys()))
self.assertEquals(8, len(result["shards"]))
self.assertEquals(expected_shard_keys, set(result["shards"][0].keys()))
def testBeforeKickOff(self):
"""Tests getting the job details."""
self.handler.request.set("mapreduce_id", self.mapreduce_id)
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
expected_keys = set([
"active", "chart_url", "counters", "mapper_spec", "mapreduce_id",
"name", "result_status", "shards", "start_timestamp_ms",
"updated_timestamp_ms", "params", "hooks_class_name", "chart_width"])
self.assertEquals(expected_keys, set(result.keys()))
def testBadJobId(self):
"""Tests when an invalid job ID is supplied."""
self.handler.request.set("mapreduce_id", "does not exist")
self.handler.get()
result = json.loads(self.handler.response.out.getvalue())
self.assertEquals(
{"error_message": "\"Could not find job with ID 'does not exist'\"",
"error_class": "KeyError"},
result)
# TODO(user): Add tests for abort
# TODO(user): Add tests for cleanup
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module detects whether third-party libraries, utilized by third-party
drivers, are present on the system. If they are not, it mocks them and tinkers
with sys.modules so that the drivers can be loaded by unit tests, and the unit
tests can continue to test the functionality of those drivers without the
respective external libraries' actually being present.
Any external library required by a third-party driver should be mocked here.
Current list of mocked libraries:
- ipminative
- proliantutils
- pysnmp
- scciclient
- oneview_client
- pywsman
- python-dracclient
"""
import sys
import mock
from oslo_utils import importutils
import six
from ironic.drivers.modules import ipmitool
from ironic.tests.unit.drivers import third_party_driver_mock_specs \
as mock_specs
# IPMITool driver checks the system for presence of 'ipmitool' binary during
# __init__. We bypass that check in order to run the unit tests, which do not
# depend on 'ipmitool' being on the system.
ipmitool.TIMING_SUPPORT = False
ipmitool.DUAL_BRIDGE_SUPPORT = False
ipmitool.SINGLE_BRIDGE_SUPPORT = False
pyghmi = importutils.try_import("pyghmi")
if not pyghmi:
p = mock.MagicMock(spec_set=mock_specs.PYGHMI_SPEC)
p.exceptions = mock.MagicMock(spec_set=mock_specs.PYGHMI_EXC_SPEC)
p.exceptions.IpmiException = Exception
p.ipmi = mock.MagicMock(spec_set=mock_specs.PYGHMI_IPMI_SPEC)
p.ipmi.command = mock.MagicMock(spec_set=mock_specs.PYGHMI_IPMICMD_SPEC)
p.ipmi.command.Command = mock.MagicMock(spec_set=[])
sys.modules['pyghmi'] = p
sys.modules['pyghmi.exceptions'] = p.exceptions
sys.modules['pyghmi.ipmi'] = p.ipmi
sys.modules['pyghmi.ipmi.command'] = p.ipmi.command
# FIXME(deva): the next line is a hack, because several unit tests
# actually depend on this particular string being present
# in pyghmi.ipmi.command.boot_devices
p.ipmi.command.boot_devices = {'pxe': 4}
if 'ironic.drivers.modules.ipminative' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.ipminative'])
proliantutils = importutils.try_import('proliantutils')
if not proliantutils:
proliantutils = mock.MagicMock(spec_set=mock_specs.PROLIANTUTILS_SPEC)
sys.modules['proliantutils'] = proliantutils
sys.modules['proliantutils.ilo'] = proliantutils.ilo
sys.modules['proliantutils.ilo.client'] = proliantutils.ilo.client
sys.modules['proliantutils.exception'] = proliantutils.exception
sys.modules['proliantutils.utils'] = proliantutils.utils
proliantutils.utils.process_firmware_image = mock.MagicMock()
proliantutils.exception.IloError = type('IloError', (Exception,), {})
command_exception = type('IloCommandNotSupportedError', (Exception,), {})
proliantutils.exception.IloCommandNotSupportedError = command_exception
proliantutils.exception.InvalidInputError = type(
'InvalidInputError', (Exception,), {})
proliantutils.exception.ImageExtractionFailed = type(
'ImageExtractionFailed', (Exception,), {})
if 'ironic.drivers.ilo' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.ilo'])
oneview_client = importutils.try_import('oneview_client')
if not oneview_client:
oneview_client = mock.MagicMock(spec_set=mock_specs.ONEVIEWCLIENT_SPEC)
sys.modules['oneview_client'] = oneview_client
sys.modules['oneview_client.client'] = oneview_client.client
states = mock.MagicMock(
spec_set=mock_specs.ONEVIEWCLIENT_STATES_SPEC,
ONEVIEW_POWER_OFF='Off',
ONEVIEW_POWERING_OFF='PoweringOff',
ONEVIEW_POWER_ON='On',
ONEVIEW_POWERING_ON='PoweringOn',
ONEVIEW_RESETTING='Resetting',
ONEVIEW_ERROR='error')
sys.modules['oneview_client.states'] = states
sys.modules['oneview_client.exceptions'] = oneview_client.exceptions
sys.modules['oneview_client.utils'] = oneview_client.utils
oneview_client.exceptions.OneViewException = type('OneViewException',
(Exception,), {})
sys.modules['oneview_client.models'] = oneview_client.models
oneview_client_module = importutils.try_import('oneview_client.client')
# NOTE(vdrok): Always mock the oneview client, as it tries to establish
# connection to oneview right in __init__, and stevedore does not seem to care
# about mocks when it loads a module in mock_the_extension_manager
sys.modules['oneview_client.client'].Client = mock.MagicMock(
spec_set=mock_specs.ONEVIEWCLIENT_CLIENT_CLS_SPEC
)
if 'ironic.drivers.oneview' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.oneview'])
# attempt to load the external 'python-dracclient' library, which is required
# by the optional drivers.modules.drac module
dracclient = importutils.try_import('dracclient')
if not dracclient:
dracclient = mock.MagicMock(spec_set=mock_specs.DRACCLIENT_SPEC)
dracclient.client = mock.MagicMock(
spec_set=mock_specs.DRACCLIENT_CLIENT_MOD_SPEC)
dracclient.constants = mock.MagicMock(
spec_set=mock_specs.DRACCLIENT_CONSTANTS_MOD_SPEC,
POWER_OFF=mock.sentinel.POWER_OFF,
POWER_ON=mock.sentinel.POWER_ON,
REBOOT=mock.sentinel.REBOOT)
sys.modules['dracclient'] = dracclient
sys.modules['dracclient.client'] = dracclient.client
sys.modules['dracclient.constants'] = dracclient.constants
sys.modules['dracclient.exceptions'] = dracclient.exceptions
dracclient.exceptions.BaseClientException = type('BaseClientException',
(Exception,), {})
# Now that the external library has been mocked, if anything had already
# loaded any of the drivers, reload them.
if 'ironic.drivers.modules.drac' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.drac'])
# attempt to load the external 'pysnmp' library, which is required by
# the optional drivers.modules.snmp module
pysnmp = importutils.try_import("pysnmp")
if not pysnmp:
pysnmp = mock.MagicMock(spec_set=mock_specs.PYWSNMP_SPEC)
sys.modules["pysnmp"] = pysnmp
sys.modules["pysnmp.entity"] = pysnmp.entity
sys.modules["pysnmp.entity.rfc3413"] = pysnmp.entity.rfc3413
sys.modules["pysnmp.entity.rfc3413.oneliner"] = (
pysnmp.entity.rfc3413.oneliner)
sys.modules["pysnmp.entity.rfc3413.oneliner.cmdgen"] = (
pysnmp.entity.rfc3413.oneliner.cmdgen)
sys.modules["pysnmp.error"] = pysnmp.error
pysnmp.error.PySnmpError = Exception
sys.modules["pysnmp.proto"] = pysnmp.proto
sys.modules["pysnmp.proto.rfc1902"] = pysnmp.proto.rfc1902
# Patch the RFC1902 integer class with a python int
pysnmp.proto.rfc1902.Integer = int
# if anything has loaded the snmp driver yet, reload it now that the
# external library has been mocked
if 'ironic.drivers.modules.snmp' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.snmp'])
# attempt to load the external 'scciclient' library, which is required by
# the optional drivers.modules.irmc module
scciclient = importutils.try_import('scciclient')
if not scciclient:
mock_scciclient = mock.MagicMock(spec_set=mock_specs.SCCICLIENT_SPEC)
sys.modules['scciclient'] = mock_scciclient
sys.modules['scciclient.irmc'] = mock_scciclient.irmc
sys.modules['scciclient.irmc.scci'] = mock.MagicMock(
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC,
POWER_OFF=mock.sentinel.POWER_OFF,
POWER_ON=mock.sentinel.POWER_ON,
POWER_RESET=mock.sentinel.POWER_RESET,
MOUNT_CD=mock.sentinel.MOUNT_CD,
UNMOUNT_CD=mock.sentinel.UNMOUNT_CD,
MOUNT_FD=mock.sentinel.MOUNT_FD,
UNMOUNT_FD=mock.sentinel.UNMOUNT_FD)
# if anything has loaded the iRMC driver yet, reload it now that the
# external library has been mocked
if 'ironic.drivers.modules.irmc' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.irmc'])
# install mock object to prevent 'iscsi_irmc' and 'agent_irmc' from
# checking whether NFS/CIFS share file system is mounted or not.
irmc_boot = importutils.import_module(
'ironic.drivers.modules.irmc.boot')
irmc_boot.check_share_fs_mounted_orig = irmc_boot.check_share_fs_mounted
irmc_boot.check_share_fs_mounted_patcher = mock.patch(
'ironic.drivers.modules.irmc.boot.check_share_fs_mounted')
irmc_boot.check_share_fs_mounted_patcher.return_value = None
ironic_inspector_client = importutils.try_import('ironic_inspector_client')
if not ironic_inspector_client:
ironic_inspector_client = mock.MagicMock(
spec_set=mock_specs.IRONIC_INSPECTOR_CLIENT_SPEC)
ironic_inspector_client.ClientV1 = mock_specs.InspectorClientV1Specs
sys.modules['ironic_inspector_client'] = ironic_inspector_client
if 'ironic.drivers.modules.inspector' in sys.modules:
six.moves.reload_module(
sys.modules['ironic.drivers.modules.inspector'])
class MockKwargsException(Exception):
def __init__(self, *args, **kwargs):
super(MockKwargsException, self).__init__(*args)
self.kwargs = kwargs
ucssdk = importutils.try_import('UcsSdk')
if not ucssdk:
ucssdk = mock.MagicMock()
sys.modules['UcsSdk'] = ucssdk
sys.modules['UcsSdk.utils'] = ucssdk.utils
sys.modules['UcsSdk.utils.power'] = ucssdk.utils.power
sys.modules['UcsSdk.utils.management'] = ucssdk.utils.management
sys.modules['UcsSdk.utils.exception'] = ucssdk.utils.exception
ucssdk.utils.exception.UcsOperationError = (
type('UcsOperationError', (MockKwargsException,), {}))
ucssdk.utils.exception.UcsConnectionError = (
type('UcsConnectionError', (MockKwargsException,), {}))
if 'ironic.drivers.modules.ucs' in sys.modules:
six.moves.reload_module(
sys.modules['ironic.drivers.modules.ucs'])
imcsdk = importutils.try_import('ImcSdk')
if not imcsdk:
imcsdk = mock.MagicMock()
imcsdk.ImcException = Exception
sys.modules['ImcSdk'] = imcsdk
if 'ironic.drivers.modules.cimc' in sys.modules:
six.moves.reload_module(
sys.modules['ironic.drivers.modules.cimc'])
|
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import struct
import six
from . import packet_base
from . import packet_utils
from ryu.lib import stringify
ICMP_ECHO_REPLY = 0
ICMP_DEST_UNREACH = 3
ICMP_SRC_QUENCH = 4
ICMP_REDIRECT = 5
ICMP_ECHO_REQUEST = 8
ICMP_TIME_EXCEEDED = 11
ICMP_ECHO_REPLY_CODE = 0
ICMP_HOST_UNREACH_CODE = 1
ICMP_PORT_UNREACH_CODE = 3
ICMP_TTL_EXPIRED_CODE = 0
class icmp(packet_base.PacketBase):
"""ICMP (RFC 792) header encoder/decoder class.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== ====================
Attribute Description
============== ====================
type Type
code Code
csum CheckSum \
(0 means automatically-calculate when encoding)
data Payload. \
Either a bytearray, or \
ryu.lib.packet.icmp.echo or \
ryu.lib.packet.icmp.dest_unreach or \
ryu.lib.packet.icmp.TimeExceeded object \
NOTE for icmp.echo: \
This includes "unused" 16 bits and the following \
"Internet Header + 64 bits of Original Data Datagram" of \
the ICMP header. \
NOTE for icmp.dest_unreach and icmp.TimeExceeded: \
This includes "unused" 8 or 24 bits and the following \
"Internet Header + leading octets of original datagram" \
of the original packet.
============== ====================
"""
_PACK_STR = '!BBH'
_MIN_LEN = struct.calcsize(_PACK_STR)
_ICMP_TYPES = {}
@staticmethod
def register_icmp_type(*args):
def _register_icmp_type(cls):
for type_ in args:
icmp._ICMP_TYPES[type_] = cls
return cls
return _register_icmp_type
def __init__(self, type_=ICMP_ECHO_REQUEST, code=0, csum=0, data=b''):
super(icmp, self).__init__()
self.type = type_
self.code = code
self.csum = csum
self.data = data
@classmethod
def parser(cls, buf):
(type_, code, csum) = struct.unpack_from(cls._PACK_STR, buf)
msg = cls(type_, code, csum)
offset = cls._MIN_LEN
if len(buf) > offset:
cls_ = cls._ICMP_TYPES.get(type_, None)
if cls_:
msg.data = cls_.parser(buf, offset)
else:
msg.data = buf[offset:]
return msg, None, None
def serialize(self, payload, prev):
hdr = bytearray(struct.pack(icmp._PACK_STR, self.type,
self.code, self.csum))
if self.data:
if self.type in icmp._ICMP_TYPES:
assert isinstance(self.data, _ICMPv4Payload)
hdr += self.data.serialize()
else:
hdr += self.data
else:
self.data = echo()
hdr += self.data.serialize()
if self.csum == 0:
self.csum = packet_utils.checksum(hdr)
struct.pack_into('!H', hdr, 2, self.csum)
return hdr
def __len__(self):
return self._MIN_LEN + len(self.data)
@six.add_metaclass(abc.ABCMeta)
class _ICMPv4Payload(stringify.StringifyMixin):
"""
Base class for the payload of ICMPv4 packet.
"""
@icmp.register_icmp_type(ICMP_ECHO_REPLY, ICMP_ECHO_REQUEST)
class echo(_ICMPv4Payload):
"""ICMP sub encoder/decoder class for Echo and Echo Reply messages.
This is used with ryu.lib.packet.icmp.icmp for
ICMP Echo and Echo Reply messages.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== ====================
Attribute Description
============== ====================
id Identifier
seq Sequence Number
data Internet Header + 64 bits of Original Data Datagram
============== ====================
"""
_PACK_STR = '!HH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, id_=0, seq=0, data=None):
super(echo, self).__init__()
self.id = id_
self.seq = seq
self.data = data
@classmethod
def parser(cls, buf, offset):
(id_, seq) = struct.unpack_from(cls._PACK_STR, buf, offset)
msg = cls(id_, seq)
offset += cls._MIN_LEN
if len(buf) > offset:
msg.data = buf[offset:]
return msg
def serialize(self):
hdr = bytearray(struct.pack(echo._PACK_STR, self.id,
self.seq))
if self.data is not None:
hdr += self.data
return hdr
def __len__(self):
length = self._MIN_LEN
if self.data is not None:
length += len(self.data)
return length
@icmp.register_icmp_type(ICMP_DEST_UNREACH)
class dest_unreach(_ICMPv4Payload):
"""ICMP sub encoder/decoder class for Destination Unreachable Message.
This is used with ryu.lib.packet.icmp.icmp for
ICMP Destination Unreachable Message.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
[RFC1191] reserves bits for the "Next-Hop MTU" field.
[RFC4884] introduced 8-bit data length attribute.
.. tabularcolumns:: |l|p{35em}|
============== =====================================================
Attribute Description
============== =====================================================
data_len data length
mtu Next-Hop MTU
NOTE: This field is required when icmp code is 4
code 4 = fragmentation needed and DF set
data Internet Header + leading octets of original datagram
============== =====================================================
"""
_PACK_STR = '!xBH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, data_len=0, mtu=0, data=None):
super(dest_unreach, self).__init__()
if ((data_len >= 0) and (data_len <= 255)):
self.data_len = data_len
else:
raise ValueError('Specified data length (%d) is invalid.' % data_len)
self.mtu = mtu
self.data = data
@classmethod
def parser(cls, buf, offset):
(data_len, mtu) = struct.unpack_from(cls._PACK_STR,
buf, offset)
msg = cls(data_len, mtu)
offset += cls._MIN_LEN
if len(buf) > offset:
msg.data = buf[offset:]
return msg
def serialize(self):
hdr = bytearray(struct.pack(dest_unreach._PACK_STR,
self.data_len, self.mtu))
if self.data is not None:
hdr += self.data
return hdr
def __len__(self):
length = self._MIN_LEN
if self.data is not None:
length += len(self.data)
return length
@icmp.register_icmp_type(ICMP_TIME_EXCEEDED)
class TimeExceeded(_ICMPv4Payload):
"""ICMP sub encoder/decoder class for Time Exceeded Message.
This is used with ryu.lib.packet.icmp.icmp for
ICMP Time Exceeded Message.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
[RFC4884] introduced 8-bit data length attribute.
.. tabularcolumns:: |l|L|
============== ====================
Attribute Description
============== ====================
data_len data length
data Internet Header + leading octets of original datagram
============== ====================
"""
_PACK_STR = '!xBxx'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, data_len=0, data=None):
if (data_len >= 0) and (data_len <= 255):
self.data_len = data_len
else:
raise ValueError('Specified data length (%d) is invalid.' % data_len)
self.data = data
@classmethod
def parser(cls, buf, offset):
(data_len, ) = struct.unpack_from(cls._PACK_STR, buf, offset)
msg = cls(data_len)
offset += cls._MIN_LEN
if len(buf) > offset:
msg.data = buf[offset:]
return msg
def serialize(self):
hdr = bytearray(struct.pack(TimeExceeded._PACK_STR, self.data_len))
if self.data is not None:
hdr += self.data
return hdr
def __len__(self):
length = self._MIN_LEN
if self.data is not None:
length += len(self.data)
return length
icmp.set_classes(icmp._ICMP_TYPES)
|
|
import secrets
import site
import sys
import textwrap
import pytest
from collections import defaultdict
from dataclasses import dataclass, field, asdict
from pathlib import Path
from typing import Any, List, Dict, Tuple
from unittest.mock import patch
from httpie.context import Environment
from httpie.compat import importlib_metadata
from httpie.status import ExitStatus
from httpie.plugins.manager import (
enable_plugins,
ENTRY_POINT_CLASSES as CLASSES,
)
def make_name() -> str:
return 'httpie-' + secrets.token_hex(4)
@dataclass
class EntryPoint:
name: str
group: str
def dump(self) -> Dict[str, str]:
return asdict(self)
@dataclass
class Plugin:
interface: 'Interface'
name: str = field(default_factory=make_name)
version: str = '1.0.0'
entry_points: List[EntryPoint] = field(default_factory=list)
def build(self) -> None:
'''
Create an installable dummy plugin at the given path.
It will create a setup.py with the specified entry points,
as well as dummy classes in a python module to imitate
real plugins.
'''
groups = defaultdict(list)
for entry_point in self.entry_points:
groups[entry_point.group].append(entry_point.name)
setup_eps = {
group: [
f'{name} = {self.import_name}:{name.title()}'
for name in names
]
for group, names in groups.items()
}
self.path.mkdir(parents=True, exist_ok=True)
with open(self.path / 'setup.py', 'w') as stream:
stream.write(textwrap.dedent(f'''
from setuptools import setup
setup(
name='{self.name}',
version='{self.version}',
py_modules=['{self.import_name}'],
entry_points={setup_eps!r},
install_requires=['httpie']
)
'''))
with open(self.path / (self.import_name + '.py'), 'w') as stream:
stream.write('from httpie.plugins import *\n')
stream.writelines(
f'class {name.title()}({CLASSES[group].__name__}): ...\n'
for group, names in groups.items()
for name in names
)
def dump(self) -> Dict[str, Any]:
return {
'version': self.version,
'entry_points': [
entry_point.dump()
for entry_point in self.entry_points
]
}
@property
def path(self) -> Path:
return self.interface.path / self.name
@property
def import_name(self) -> str:
return self.name.replace('-', '_')
@dataclass
class Interface:
path: Path
environment: Environment
def get_plugin(self, target: str) -> importlib_metadata.Distribution:
with enable_plugins(self.environment.config.plugins_dir):
return importlib_metadata.distribution(target)
def is_installed(self, target: str) -> bool:
try:
self.get_plugin(target)
except ModuleNotFoundError:
return False
else:
return True
def make_dummy_plugin(self, build=True, **kwargs) -> Plugin:
kwargs.setdefault('entry_points', [EntryPoint('test', 'httpie.plugins.auth.v1')])
plugin = Plugin(self, **kwargs)
if build:
plugin.build()
return plugin
def parse_listing(lines: List[str]) -> Dict[str, Any]:
plugins = {}
current_plugin = None
def parse_entry_point(line: str) -> Tuple[str, str]:
entry_point, raw_group = line.strip().split()
return entry_point, raw_group[1:-1]
def parse_plugin(line: str) -> Tuple[str, str]:
plugin, raw_version = line.strip().split()
return plugin, raw_version[1:-1]
for line in lines:
if not line.strip():
continue
if line[0].isspace():
# <indent> $entry_point ($group)
assert current_plugin is not None
entry_point, group = parse_entry_point(line)
plugins[current_plugin]['entry_points'].append({
'name': entry_point,
'group': group
})
else:
# $plugin ($version)
current_plugin, version = parse_plugin(line)
plugins[current_plugin] = {
'version': version,
'entry_points': []
}
return plugins
@pytest.fixture(scope='function')
def interface(tmp_path):
from tests.utils import MockEnvironment
return Interface(
path=tmp_path / 'interface',
environment=MockEnvironment(stdout_mode='t')
)
@pytest.fixture(scope='function')
def dummy_plugin(interface):
return interface.make_dummy_plugin()
@pytest.fixture(scope='function')
def broken_plugin(interface):
base_plugin = interface.make_dummy_plugin()
with open(base_plugin.path / (base_plugin.import_name + '.py'), 'a') as stream:
stream.write('raise ValueError("broken plugin")\n')
return base_plugin
@pytest.fixture(scope='function')
def dummy_plugins(interface):
# Multiple plugins with different configurations
return [
interface.make_dummy_plugin(),
interface.make_dummy_plugin(
version='3.2.0'
),
interface.make_dummy_plugin(
entry_points=[
EntryPoint('test_1', 'httpie.plugins.converter.v1'),
EntryPoint('test_2', 'httpie.plugins.formatter.v1')
]
),
]
@pytest.fixture
def httpie_plugins(interface):
from tests.utils import httpie
from httpie.plugins.registry import plugin_manager
def runner(*args):
# Prevent installed plugins from showing up.
original_plugins = plugin_manager.copy()
clean_sys_path = set(sys.path).difference(site.getsitepackages())
with patch('sys.path', list(clean_sys_path)):
response = httpie('plugins', *args, env=interface.environment)
plugin_manager.clear()
plugin_manager.extend(original_plugins)
return response
return runner
@pytest.fixture
def httpie_plugins_success(httpie_plugins):
def runner(*args):
response = httpie_plugins(*args)
assert response.exit_status == ExitStatus.SUCCESS
return response.splitlines()
return runner
|
|
"""
Module for serialization/deserialization and handling of KNX addresses.
The module can handle:
* individual addresses of devices.
* (logical) group addresses.
* xknx internal group addresses.
The module supports all different writings of group addresses:
* 3rn level: "1/2/3"
* 2nd level: "1/2"
* Free format: "123"
"""
from __future__ import annotations
from abc import ABC
from enum import Enum
from re import compile as re_compile
from typing import Optional, Union
from xknx.exceptions import CouldNotParseAddress
# TODO: typing - remove need for Optional here
GroupAddressableType = Optional[Union["GroupAddress", str, int, tuple[int, int]]]
IndividualAddressableType = Optional[
Union["IndividualAddress", str, int, tuple[int, int]]
]
InternalGroupAddressableType = Union["InternalGroupAddress", str]
DeviceAddressableType = Union[GroupAddressableType, InternalGroupAddressableType]
DeviceGroupAddress = Union["GroupAddress", "InternalGroupAddress"]
def parse_device_group_address(
address: DeviceAddressableType,
) -> DeviceGroupAddress:
"""Parse an Addressable type to GroupAddress or InternalGroupAddress."""
if isinstance(address, (GroupAddress, InternalGroupAddress)):
return address
try:
return GroupAddress(address)
except CouldNotParseAddress as ex:
if isinstance(address, str):
return InternalGroupAddress(address)
raise ex
def address_tuple_to_int(address: tuple[int, int]) -> int:
"""
Convert the tuple `address` to an integer.
Valid values inside the `address` tuple are:
* Positive Numbers between 0 and 255 (binary)
"""
if (
any(not isinstance(byte, int) for byte in address)
or any(byte < 0 for byte in address)
or any(byte > 255 for byte in address)
):
raise CouldNotParseAddress(address)
return int(address[0] * 256 + address[1])
class BaseAddress(ABC):
"""Base class for all knx address types."""
def __init__(self) -> None:
"""Initialize instance variables needed by all subclasses."""
self.raw: int = 0
def to_knx(self) -> tuple[int, int]:
"""
Serialize to KNX/IP raw data.
Returns a 2-Byte tuple generated from the raw Value.
"""
return (self.raw >> 8) & 255, self.raw & 255
def __eq__(self, other: object | None) -> bool:
"""
Implement the equal operator.
Returns `True` if we check against the same subclass and the
raw Value matches.
"""
if isinstance(self, type(other)):
return self.__hash__() == other.__hash__()
return False
def __hash__(self) -> int:
"""Hash Address so it can be used as dict key."""
return self.raw
class IndividualAddress(BaseAddress):
"""Class for handling KNX individual addresses."""
MAX_AREA = 15
MAX_MAIN = 15
MAX_LINE = 255
ADDRESS_RE = re_compile(
r"^(?P<area>\d{1,2})\.(?P<main>\d{1,2})\.(?P<line>\d{1,3})$"
)
def __init__(self, address: IndividualAddressableType) -> None:
"""Initialize IndividualAddress class."""
super().__init__()
if isinstance(address, IndividualAddress):
self.raw = address.raw
elif isinstance(address, str):
if address.isdigit():
self.raw = int(address)
else:
self.raw = self.__string_to_int(address)
elif isinstance(address, tuple) and len(address) == 2:
self.raw = address_tuple_to_int(address)
elif isinstance(address, int):
self.raw = address
elif address is None:
self.raw = 0
else:
raise CouldNotParseAddress(address)
if self.raw > 65535:
raise CouldNotParseAddress(address)
def __string_to_int(self, address: str) -> int:
"""
Parse `address` as string to an integer and do some simple checks.
Returns the integer representation of `address` if all checks are valid:
* string matches against the regular expression
* area, main and line are inside its range
In any other case, we raise an `CouldNotParseAddress` exception.
"""
match = self.ADDRESS_RE.match(address)
if not match:
raise CouldNotParseAddress(address)
area = int(match.group("area"))
main = int(match.group("main"))
line = int(match.group("line"))
if area > self.MAX_AREA or main > self.MAX_MAIN or line > self.MAX_LINE:
raise CouldNotParseAddress(address)
return (area << 12) + (main << 8) + line
@property
def area(self) -> int:
"""Return area part of individual address."""
return (self.raw >> 12) & self.MAX_AREA
@property
def main(self) -> int:
"""Return main part of individual address."""
return (self.raw >> 8) & self.MAX_MAIN
@property
def line(self) -> int:
"""Return line part of individual address."""
return self.raw & self.MAX_LINE
@property
def is_device(self) -> bool:
"""Return `True` if this address is a valid device address."""
return self.line != 0
@property
def is_line(self) -> bool:
"""Return `True` if this address is a valid line address."""
return not self.is_device
def __str__(self) -> str:
"""Return object as in KNX notation (e.g. '1.2.3')."""
return f"{self.area}.{self.main}.{self.line}"
def __repr__(self) -> str:
"""Return this object as parsable string."""
return f'IndividualAddress("{self}")'
class GroupAddressType(Enum):
"""
Possible types of `GroupAddress`.
KNX knows three types of group addresses:
* FREE, a integer or hex representation
* SHORT, a representation like '1/123', without middle groups
* LONG, a representation like '1/2/34', with middle groups
"""
FREE = 0
SHORT = 2
LONG = 3
class GroupAddress(BaseAddress):
"""Class for handling KNX group addresses."""
MAX_MAIN = 31
MAX_MIDDLE = 7
MAX_SUB_LONG = 255
MAX_SUB_SHORT = 2047
MAX_FREE = 65535
ADDRESS_RE = re_compile(
r"^(?P<main>\d{1,2})(/(?P<middle>\d{1,2}))?/(?P<sub>\d{1,4})$"
)
def __init__(
self,
address: GroupAddressableType,
levels: GroupAddressType = GroupAddressType.LONG,
) -> None:
"""Initialize GroupAddress class."""
super().__init__()
self.levels = levels
if isinstance(address, GroupAddress):
self.raw = address.raw
elif isinstance(address, str):
if address.isdigit():
self.raw = int(address)
else:
self.raw = self.__string_to_int(address)
elif isinstance(address, tuple) and len(address) == 2:
self.raw = address_tuple_to_int(address)
elif isinstance(address, int):
self.raw = address
elif address is None:
self.raw = 0
else:
raise CouldNotParseAddress(address)
if self.raw > 65535:
raise CouldNotParseAddress(address)
def __string_to_int(self, address: str) -> int:
"""
Parse `address` as string to an integer and do some simple checks.
Returns the integer representation of `address` if all checks are valid:
* string matches against the regular expression
* main, middle and sub are inside its range
In any other case, we raise an `CouldNotParseAddress` exception.
"""
match = self.ADDRESS_RE.match(address)
if not match:
raise CouldNotParseAddress(address)
main = int(match.group("main"))
middle = (
int(match.group("middle")) if match.group("middle") is not None else None
)
sub = int(match.group("sub"))
if main > self.MAX_MAIN:
raise CouldNotParseAddress(address)
if middle is not None:
if middle > self.MAX_MIDDLE:
raise CouldNotParseAddress(address)
if sub > self.MAX_SUB_LONG:
raise CouldNotParseAddress(address)
else:
if sub > self.MAX_SUB_SHORT:
raise CouldNotParseAddress(address)
return (
(main << 11) + (middle << 8) + sub
if middle is not None
else (main << 11) + sub
)
@property
def main(self) -> int | None:
"""
Return the main group part as an integer.
Works only if the group dont uses `GroupAddressType.FREE`, returns `None`
in any other case.
"""
return (
(self.raw >> 11) & self.MAX_MAIN
if self.levels != GroupAddressType.FREE
else None
)
@property
def middle(self) -> int | None:
"""
Return the middle group part as an integer.
Works only if the group uses `GroupAddressType.LONG`, returns `None` in
any other case.
"""
return (
(self.raw >> 8) & self.MAX_MIDDLE
if self.levels == GroupAddressType.LONG
else None
)
@property
def sub(self) -> int:
"""
Return the sub group part as an integer.
Works with any `GroupAddressType`, as we always have sub groups.
"""
if self.levels == GroupAddressType.SHORT:
return self.raw & self.MAX_SUB_SHORT
if self.levels == GroupAddressType.LONG:
return self.raw & self.MAX_SUB_LONG
return self.raw
def __str__(self) -> str:
"""
Return object as in KNX notation (e.g. '1/2/3').
Honors the used `GroupAddressType` of this group.
"""
if self.levels == GroupAddressType.LONG:
return f"{self.main}/{self.middle}/{self.sub}"
if self.levels == GroupAddressType.SHORT:
return f"{self.main}/{self.sub}"
return f"{self.sub}"
def __repr__(self) -> str:
"""Return object as parsable string."""
return f'GroupAddress("{self}")'
class InternalGroupAddress:
"""Class for handling addresses used internally in xknx devices only."""
def __init__(self, address: str | InternalGroupAddress) -> None:
"""Initialize InternalGroupAddress class."""
self.address: str
if isinstance(address, InternalGroupAddress):
self.address = address.address
return
if not isinstance(address, str):
raise CouldNotParseAddress(address)
prefix_length = 1
if len(address) < 2 or not address[0].lower() == "i":
raise CouldNotParseAddress(address)
if address[1] in "-_":
prefix_length = 2
self.address = address[prefix_length:].strip()
if not self.address:
raise CouldNotParseAddress(address)
def __str__(self) -> str:
"""Return object as readable string (e.g. 'i-123')."""
return f"i-{self.address}"
def __repr__(self) -> str:
"""Return object as parsable string."""
return f'InternalGroupAddress("{self}")'
def __eq__(self, other: object | None) -> bool:
"""
Implement the equal operator.
Returns `True` if we check against the same subclass and the
raw Value matches.
"""
if isinstance(self, type(other)):
return self.__hash__() == other.__hash__()
return False
def __hash__(self) -> int:
"""Hash Address so it can be used as dict key."""
return hash(self.address)
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""Use blog post test to test user permissions logic"""
import frappe
import frappe.defaults
import unittest
import frappe.model.meta
from frappe.permissions import (add_user_permission, remove_user_permission,
clear_user_permissions_for_doctype, get_doc_permissions, add_permission)
from frappe.core.page.permission_manager.permission_manager import update, reset
from frappe.test_runner import make_test_records_for_doctype
from frappe.core.doctype.user_permission.user_permission import clear_user_permissions
test_dependencies = ['Blogger', 'Blog Post', "User", "Contact", "Salutation"]
class TestPermissions(unittest.TestCase):
def setUp(self):
frappe.clear_cache(doctype="Blog Post")
if not frappe.flags.permission_user_setup_done:
user = frappe.get_doc("User", "test1@example.com")
user.add_roles("Website Manager")
user.add_roles("System Manager")
user = frappe.get_doc("User", "test2@example.com")
user.add_roles("Blogger")
user = frappe.get_doc("User", "test3@example.com")
user.add_roles("Sales User")
frappe.flags.permission_user_setup_done = True
reset('Blogger')
reset('Blog Post')
frappe.db.sql('delete from `tabUser Permission`')
frappe.set_user("test1@example.com")
def tearDown(self):
frappe.set_user("Administrator")
frappe.db.set_value("Blogger", "_Test Blogger 1", "user", None)
clear_user_permissions_for_doctype("Blog Category")
clear_user_permissions_for_doctype("Blog Post")
clear_user_permissions_for_doctype("Blogger")
@staticmethod
def set_strict_user_permissions(ignore):
ss = frappe.get_doc("System Settings")
ss.apply_strict_user_permissions = ignore
ss.flags.ignore_mandatory = 1
ss.save()
def test_basic_permission(self):
post = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertTrue(post.has_permission("read"))
def test_user_permissions_in_doc(self):
add_user_permission("Blog Category", "_Test Blog Category 1",
"test2@example.com")
frappe.set_user("test2@example.com")
post = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertFalse(post.has_permission("read"))
self.assertFalse(get_doc_permissions(post).get("read"))
post1 = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertTrue(post1.has_permission("read"))
self.assertTrue(get_doc_permissions(post1).get("read"))
def test_user_permissions_in_report(self):
add_user_permission("Blog Category", "_Test Blog Category 1", "test2@example.com")
frappe.set_user("test2@example.com")
names = [d.name for d in frappe.get_list("Blog Post", fields=["name", "blog_category"])]
self.assertTrue("-test-blog-post-1" in names)
self.assertFalse("-test-blog-post" in names)
def test_default_values(self):
doc = frappe.new_doc("Blog Post")
self.assertFalse(doc.get("blog_category"))
# Fetch default based on single user permission
add_user_permission("Blog Category", "_Test Blog Category 1", "test2@example.com")
frappe.set_user("test2@example.com")
doc = frappe.new_doc("Blog Post")
self.assertEqual(doc.get("blog_category"), "_Test Blog Category 1")
# Don't fetch default if user permissions is more than 1
add_user_permission("Blog Category", "_Test Blog Category", "test2@example.com", ignore_permissions=True)
frappe.clear_cache()
doc = frappe.new_doc("Blog Post")
self.assertFalse(doc.get("blog_category"))
# Fetch user permission set as default from multiple user permission
add_user_permission("Blog Category", "_Test Blog Category 2", "test2@example.com", ignore_permissions=True, is_default=1)
frappe.clear_cache()
doc = frappe.new_doc("Blog Post")
self.assertEqual(doc.get("blog_category"), "_Test Blog Category 2")
def test_user_link_match_doc(self):
blogger = frappe.get_doc("Blogger", "_Test Blogger 1")
blogger.user = "test2@example.com"
blogger.save()
frappe.set_user("test2@example.com")
post = frappe.get_doc("Blog Post", "-test-blog-post-2")
self.assertTrue(post.has_permission("read"))
post1 = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(post1.has_permission("read"))
def test_user_link_match_report(self):
blogger = frappe.get_doc("Blogger", "_Test Blogger 1")
blogger.user = "test2@example.com"
blogger.save()
frappe.set_user("test2@example.com")
names = [d.name for d in frappe.get_list("Blog Post", fields=["name", "owner"])]
self.assertTrue("-test-blog-post-2" in names)
self.assertFalse("-test-blog-post-1" in names)
def test_set_user_permissions(self):
frappe.set_user("test1@example.com")
add_user_permission("Blog Post", "-test-blog-post", "test2@example.com")
def test_not_allowed_to_set_user_permissions(self):
frappe.set_user("test2@example.com")
# this user can't add user permissions
self.assertRaises(frappe.PermissionError, add_user_permission,
"Blog Post", "-test-blog-post", "test2@example.com")
def test_read_if_explicit_user_permissions_are_set(self):
self.test_set_user_permissions()
frappe.set_user("test2@example.com")
# user can only access permitted blog post
doc = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertTrue(doc.has_permission("read"))
# and not this one
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
def test_not_allowed_to_remove_user_permissions(self):
self.test_set_user_permissions()
frappe.set_user("test2@example.com")
# user cannot remove their own user permissions
self.assertRaises(frappe.PermissionError, remove_user_permission,
"Blog Post", "-test-blog-post", "test2@example.com")
def test_user_permissions_if_applied_on_doc_being_evaluated(self):
frappe.set_user("test2@example.com")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertTrue(doc.has_permission("read"))
frappe.set_user("test1@example.com")
add_user_permission("Blog Post", "-test-blog-post", "test2@example.com")
frappe.set_user("test2@example.com")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
doc = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertTrue(doc.has_permission("read"))
def test_set_only_once(self):
blog_post = frappe.get_meta("Blog Post")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
doc.db_set('title', 'Old')
blog_post.get_field("title").set_only_once = 1
doc.title = "New"
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
blog_post.get_field("title").set_only_once = 0
def test_set_only_once_child_table_rows(self):
doctype_meta = frappe.get_meta("DocType")
doctype_meta.get_field("fields").set_only_once = 1
doc = frappe.get_doc("DocType", "Blog Post")
# remove last one
doc.fields = doc.fields[:-1]
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
frappe.clear_cache(doctype='DocType')
def test_set_only_once_child_table_row_value(self):
doctype_meta = frappe.get_meta("DocType")
doctype_meta.get_field("fields").set_only_once = 1
doc = frappe.get_doc("DocType", "Blog Post")
# change one property from the child table
doc.fields[-1].fieldtype = 'HTML'
self.assertRaises(frappe.CannotChangeConstantError, doc.save)
frappe.clear_cache(doctype='DocType')
def test_set_only_once_child_table_okay(self):
doctype_meta = frappe.get_meta("DocType")
doctype_meta.get_field("fields").set_only_once = 1
doc = frappe.get_doc("DocType", "Blog Post")
doc.load_doc_before_save()
self.assertFalse(doc.validate_set_only_once())
frappe.clear_cache(doctype='DocType')
def test_user_permission_doctypes(self):
add_user_permission("Blog Category", "_Test Blog Category 1",
"test2@example.com")
add_user_permission("Blogger", "_Test Blogger 1",
"test2@example.com")
frappe.set_user("test2@example.com")
frappe.clear_cache(doctype="Blog Post")
doc = frappe.get_doc("Blog Post", "-test-blog-post")
self.assertFalse(doc.has_permission("read"))
doc = frappe.get_doc("Blog Post", "-test-blog-post-2")
self.assertTrue(doc.has_permission("read"))
frappe.clear_cache(doctype="Blog Post")
def if_owner_setup(self):
update('Blog Post', 'Blogger', 0, 'if_owner', 1)
add_user_permission("Blog Category", "_Test Blog Category 1",
"test2@example.com")
add_user_permission("Blogger", "_Test Blogger 1",
"test2@example.com")
frappe.clear_cache(doctype="Blog Post")
def test_insert_if_owner_with_user_permissions(self):
"""If `If Owner` is checked for a Role, check if that document
is allowed to be read, updated, submitted, etc. except be created,
even if the document is restricted based on User Permissions."""
frappe.delete_doc('Blog Post', '-test-blog-post-title')
self.if_owner_setup()
frappe.set_user("test2@example.com")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "_Test Blog Category",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title",
"content": "_Test Blog Post Content"
})
self.assertRaises(frappe.PermissionError, doc.insert)
frappe.set_user('test1@example.com')
add_user_permission("Blog Category", "_Test Blog Category",
"test2@example.com")
frappe.set_user("test2@example.com")
doc.insert()
frappe.set_user("Administrator")
remove_user_permission("Blog Category", "_Test Blog Category",
"test2@example.com")
frappe.set_user("test2@example.com")
doc = frappe.get_doc(doc.doctype, doc.name)
self.assertTrue(doc.has_permission("read"))
self.assertTrue(doc.has_permission("write"))
self.assertFalse(doc.has_permission("create"))
# delete created record
frappe.set_user("Administrator")
frappe.delete_doc('Blog Post', '-test-blog-post-title')
def test_ignore_user_permissions_if_missing(self):
"""If there are no user permissions, then allow as per role"""
add_user_permission("Blog Category", "_Test Blog Category",
"test2@example.com")
frappe.set_user("test2@example.com")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "_Test Blog Category 2",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title",
"content": "_Test Blog Post Content"
})
self.assertFalse(doc.has_permission("write"))
frappe.set_user("Administrator")
remove_user_permission("Blog Category", "_Test Blog Category",
"test2@example.com")
frappe.set_user("test2@example.com")
self.assertTrue(doc.has_permission('write'))
def test_strict_user_permissions(self):
"""If `Strict User Permissions` is checked in System Settings,
show records even if User Permissions are missing for a linked
doctype"""
frappe.set_user('Administrator')
frappe.db.sql('DELETE FROM `tabContact`')
reset('Salutation')
reset('Contact')
make_test_records_for_doctype('Contact', force=True)
add_user_permission("Salutation", "Mr", "test3@example.com")
self.set_strict_user_permissions(0)
allowed_contact = frappe.get_doc('Contact', '_Test Contact For _Test Customer')
other_contact = frappe.get_doc('Contact', '_Test Contact For _Test Supplier')
frappe.set_user("test3@example.com")
self.assertTrue(allowed_contact.has_permission('read'))
self.assertTrue(other_contact.has_permission('read'))
self.assertEqual(len(frappe.get_list("Contact")), 2)
frappe.set_user("Administrator")
self.set_strict_user_permissions(1)
frappe.set_user("test3@example.com")
self.assertTrue(allowed_contact.has_permission('read'))
self.assertFalse(other_contact.has_permission('read'))
self.assertTrue(len(frappe.get_list("Contact")), 1)
frappe.set_user("Administrator")
self.set_strict_user_permissions(0)
clear_user_permissions_for_doctype("Salutation")
clear_user_permissions_for_doctype("Contact")
def test_user_permissions_not_applied_if_user_can_edit_user_permissions(self):
add_user_permission('Blogger', '_Test Blogger 1', 'test1@example.com')
# test1@example.com has rights to create user permissions
# so it should not matter if explicit user permissions are not set
self.assertTrue(frappe.get_doc('Blogger', '_Test Blogger').has_permission('read'))
def test_user_permission_is_not_applied_if_user_roles_does_not_have_permission(self):
add_user_permission('Blog Post', '-test-blog-post-1', 'test3@example.com')
frappe.set_user("test3@example.com")
doc = frappe.get_doc("Blog Post", "-test-blog-post-1")
self.assertFalse(doc.has_permission("read"))
frappe.set_user("Administrator")
user = frappe.get_doc("User", "test3@example.com")
user.add_roles("Blogger")
frappe.set_user("test3@example.com")
self.assertTrue(doc.has_permission("read"))
frappe.set_user("Administrator")
user.remove_roles("Blogger")
def test_contextual_user_permission(self):
# should be applicable for across all doctypes
add_user_permission('Blogger', '_Test Blogger', 'test2@example.com')
# should be applicable only while accessing Blog Post
add_user_permission('Blogger', '_Test Blogger 1', 'test2@example.com', applicable_for='Blog Post')
# should be applicable only while accessing User
add_user_permission('Blogger', '_Test Blogger 2', 'test2@example.com', applicable_for='User')
posts = frappe.get_all('Blog Post', fields=['name', 'blogger'])
# Get all posts for admin
self.assertEqual(len(posts), 4)
frappe.set_user('test2@example.com')
posts = frappe.get_list('Blog Post', fields=['name', 'blogger'])
# Should get only posts with allowed blogger via user permission
# only '_Test Blogger', '_Test Blogger 1' are allowed in Blog Post
self.assertEqual(len(posts), 3)
for post in posts:
self.assertIn(post.blogger, ['_Test Blogger', '_Test Blogger 1'], 'A post from {} is not expected.'.format(post.blogger))
def test_if_owner_permission_overrides_properly(self):
# check if user is not granted access if the user is not the owner of the doc
# Blogger has only read access on the blog post unless he is the owner of the blog
update('Blog Post', 'Blogger', 0, 'if_owner', 1)
update('Blog Post', 'Blogger', 0, 'read', 1)
update('Blog Post', 'Blogger', 0, 'write', 1)
update('Blog Post', 'Blogger', 0, 'delete', 1)
# currently test2 user has not created any document
# still he should be able to do get_list query which should
# not raise permission error but simply return empty list
frappe.set_user("test2@example.com")
self.assertEqual(frappe.get_list('Blog Post'), [])
frappe.set_user("Administrator")
# creates a custom docperm with just read access
# now any user can read any blog post (but other rights are limited to the blog post owner)
add_permission('Blog Post', 'Blogger')
frappe.clear_cache(doctype="Blog Post")
frappe.delete_doc('Blog Post', '-test-blog-post-title')
frappe.set_user("test1@example.com")
doc = frappe.get_doc({
"doctype": "Blog Post",
"blog_category": "_Test Blog Category",
"blogger": "_Test Blogger 1",
"title": "_Test Blog Post Title",
"content": "_Test Blog Post Content"
})
doc.insert()
frappe.set_user("test2@example.com")
doc = frappe.get_doc(doc.doctype, doc.name)
self.assertTrue(doc.has_permission("read"))
self.assertFalse(doc.has_permission("write"))
self.assertFalse(doc.has_permission("delete"))
# check if owner of the doc has the access that is available only for the owner of the doc
frappe.set_user("test1@example.com")
doc = frappe.get_doc(doc.doctype, doc.name)
self.assertTrue(doc.has_permission("read"))
self.assertTrue(doc.has_permission("write"))
self.assertTrue(doc.has_permission("delete"))
# delete the created doc
frappe.delete_doc('Blog Post', '-test-blog-post-title')
def test_clear_user_permissions(self):
current_user = frappe.session.user
frappe.set_user('Administrator')
clear_user_permissions_for_doctype('Blog Category', 'test2@example.com')
clear_user_permissions_for_doctype('Blog Post', 'test2@example.com')
add_user_permission('Blog Post', '-test-blog-post-1', 'test2@example.com')
add_user_permission('Blog Post', '-test-blog-post-2', 'test2@example.com')
add_user_permission("Blog Category", '_Test Blog Category 1', 'test2@example.com')
deleted_user_permission_count = clear_user_permissions('test2@example.com', 'Blog Post')
self.assertEqual(deleted_user_permission_count, 2)
blog_post_user_permission_count = frappe.db.count('User Permission', filters={
'user': 'test2@example.com',
'allow': 'Blog Post'
})
self.assertEqual(blog_post_user_permission_count, 0)
blog_category_user_permission_count = frappe.db.count('User Permission', filters={
'user': 'test2@example.com',
'allow': 'Blog Category'
})
self.assertEqual(blog_category_user_permission_count, 1)
# reset the user
frappe.set_user(current_user)
|
|
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check for decrease in coverage from 100% of frontend files."""
from __future__ import annotations
import fnmatch
import logging
import os
import re
import sys
from core import python_utils
LCOV_FILE_PATH = os.path.join(os.pardir, 'karma_coverage_reports', 'lcov.info')
RELEVANT_LCOV_LINE_PREFIXES = ['SF', 'LH', 'LF']
EXCLUDED_DIRECTORIES = [
'node_modules/*',
'extensions/classifiers/proto/*'
]
# Contains the name of all files that is not 100% coverage.
# This list must be kept up-to-date; the changes (only remove) should be done
# manually.
# Please keep the list in alphabetical order.
# NOTE TO DEVELOPERS: do not add any new files to this list without asking
# @nithusha21 first.
NOT_FULLY_COVERED_FILENAMES = [
'angular-html-bind.directive.ts',
'answer-classification.service.ts',
'App.ts',
'audio-preloader.service.ts',
'Base.ts',
'ck-editor-4-rte.component.ts',
'ck-editor-4-widgets.initializer.ts',
'collection-player-page.component.ts',
'collection.model.ts',
'contribution-and-review.service.ts',
'conversation-skin.directive.ts',
'current-interaction.service.ts',
'exploration-states.service.ts',
'expression-evaluator.service.ts',
'expression-interpolation.service.ts',
'fatigue-detection.service.ts',
'google-analytics.initializer.ts',
'language-util.service.ts',
'learner-answer-info.service.ts',
'mathjax-bind.directive.ts',
'normalize-whitespace-punctuation-and-case.pipe.ts',
'object-editor.directive.ts',
'oppia-footer.component.ts',
'oppia-interactive-music-notes-input.directive.ts',
'oppia-interactive-pencil-code-editor.directive.ts',
'oppia-root.directive.ts',
'parameterize-rule-description.filter.ts',
'python-program.tokenizer.ts',
'question-update.service.ts',
'refresher-exploration-confirmation-modal.service.ts',
'rule-type-selector.directive.ts',
'schema-based-custom-viewer.directive.ts',
'schema-based-html-viewer.directive.ts',
'schema-based-list-viewer.directive.ts',
'select2-dropdown.directive.ts',
'state-card.model.ts',
'state-content-editor.directive.ts',
'state-interaction-editor.directive.ts',
'story-node.model.ts',
'subtopic.model.ts',
'translation-file-hash-loader-backend-api.service.ts',
'truncate-and-capitalize.filter.ts',
'truncate-and-capitalize.pipe.ts',
'truncate-input-based-on-interaction-answer-type.filter.ts',
'truncate.filter.ts',
# Please don't try to cover `unit-test-utils.ajs.ts` file.
'unit-test-utils.ajs.ts',
'voiceover-recording.service.ts',
]
class LcovStanzaRelevantLines:
"""Gets the relevant lines from a lcov stanza."""
def __init__(self, stanza):
"""Initialize the object which provides relevant data of a lcov
stanza in order to calculate any decrease in frontend test coverage.
Args:
stanza: list(str). Contains all the lines from a lcov stanza.
Raises:
Exception. The file_path is empty.
Exception. Total lines number is not found.
Exception. Covered lines number is not found.
"""
match = re.search('SF:(.+)\n', stanza)
if match is None:
raise Exception(
'The test path is empty or null. '
'It\'s not possible to diff the test coverage correctly.')
_, file_name = os.path.split(match.group(1))
self.file_name = file_name
self.file_path = match.group(1)
match = re.search(r'LF:(\d+)\n', stanza)
if match is None:
raise Exception(
'It wasn\'t possible to get the total lines of {} file.'
'It\'s not possible to diff the test coverage correctly.'
.format(file_name))
self.total_lines = int(match.group(1))
match = re.search(r'LH:(\d+)\n', stanza)
if match is None:
raise Exception(
'It wasn\'t possible to get the covered lines of {} file.'
'It\'s not possible to diff the test coverage correctly.'
.format(file_name))
self.covered_lines = int(match.group(1))
def get_stanzas_from_lcov_file():
"""Get all stanzas from a lcov file. The lcov file gather all the frontend
files that has tests and each one has the following structure:
TN: test name
SF: file path
FNF: total functions
FNH: functions covered
LF: total lines
LH: lines covered
BRF: total branches
BRH: branches covered
end_of_record
Returns:
list(LcovStanzaRelevantLines). A list with all stanzas.
"""
f = python_utils.open_file(LCOV_FILE_PATH, 'r')
lcov_items_list = f.read().split('end_of_record')
stanzas_list = []
for item in lcov_items_list:
if item.strip('\n'):
stanza = LcovStanzaRelevantLines(item)
stanzas_list.append(stanza)
return stanzas_list
def check_not_fully_covered_filenames_list_is_sorted():
"""Check if NOT_FULLY_COVERED_FILENAMES list is in alphabetical order."""
if NOT_FULLY_COVERED_FILENAMES != sorted(
NOT_FULLY_COVERED_FILENAMES, key=lambda s: s.lower()):
logging.error(
'The \033[1mNOT_FULLY_COVERED_FILENAMES\033[0m list must be'
' kept in alphabetical order.')
sys.exit(1)
def check_coverage_changes():
"""Checks if the denylist for not fully covered files needs to be changed
by:
- File renaming
- File deletion
Raises:
Exception. LCOV_FILE_PATH doesn't exist.
"""
if not os.path.exists(LCOV_FILE_PATH):
raise Exception(
'Expected lcov file to be available at {}, but the'
' file does not exist.'.format(LCOV_FILE_PATH))
stanzas = get_stanzas_from_lcov_file()
remaining_denylisted_files = list(NOT_FULLY_COVERED_FILENAMES)
errors = ''
for stanza in stanzas:
file_name = stanza.file_name
total_lines = stanza.total_lines
covered_lines = stanza.covered_lines
if any(fnmatch.fnmatch(
stanza.file_path, pattern) for pattern in EXCLUDED_DIRECTORIES):
continue
if file_name not in remaining_denylisted_files:
if total_lines != covered_lines:
errors += (
'\033[1m{}\033[0m seems to be not completely tested.'
' Make sure it\'s fully covered.\n'.format(file_name))
else:
if total_lines == covered_lines:
errors += (
'\033[1m{}\033[0m seems to be fully covered!'
' Before removing it manually from the denylist'
' in the file'
' scripts/check_frontend_test_coverage.py, please'
' make sure you\'ve followed the unit tests rules'
' correctly on:'
' https://github.com/oppia/oppia/wiki/Frontend'
'-unit-tests-guide#rules\n'.format(file_name))
remaining_denylisted_files.remove(file_name)
if remaining_denylisted_files:
for test_name in remaining_denylisted_files:
errors += (
'\033[1m{}\033[0m is in the frontend test coverage'
' denylist but it doesn\'t exist anymore. If you have'
' renamed it, please make sure to remove the old file'
' name and add the new file name in the denylist in'
' the file scripts/check_frontend_test_coverage.py.\n'
.format(test_name))
if errors:
print('------------------------------------')
print('Frontend Coverage Checks Not Passed.')
print('------------------------------------')
logging.error(errors)
sys.exit(1)
else:
print('------------------------------------')
print('All Frontend Coverage Checks Passed.')
print('------------------------------------')
check_not_fully_covered_filenames_list_is_sorted()
def main():
"""Runs all the steps for checking if there is any decrease of 100% covered
files in the frontend.
"""
check_coverage_changes()
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when check_frontend_test_coverage.py
# is used as a script.
if __name__ == '__main__': # pragma: no cover
main()
|
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Solr search using the synchronizer, i.e. as it would be used by an user
"""
import logging
import os
import sys
import time
from gridfs import GridFS
from pymongo import MongoClient
from pysolr import Solr, SolrError
sys.path[0:0] = [""]
from tests import solr_pair, unittest
from tests.setup_cluster import ReplicaSet
from tests.util import assert_soon
from mongo_connector.connector import Connector
from mongo_connector.doc_managers.solr_doc_manager import DocManager
from mongo_connector.util import retry_until_ok
class SolrTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
conn_str = "http://%s/solr" % solr_pair
cls.solr_conn = Solr(conn_str)
cls.docman = DocManager(conn_str, auto_commit_interval=0)
def _search(self, query):
return self.docman._stream_search(query)
def _remove(self):
self.solr_conn.delete(q="*:*", commit=True)
class TestSolr(SolrTestCase):
""" Tests Solr
"""
@classmethod
def setUpClass(cls):
SolrTestCase.setUpClass()
cls.repl_set = ReplicaSet().start()
cls.conn = cls.repl_set.client()
@classmethod
def tearDownClass(cls):
""" Kills cluster instance
"""
cls.repl_set.stop()
def setUp(self):
self._remove()
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
docman = DocManager('http://%s/solr' % solr_pair,
auto_commit_interval=0)
self.connector = Connector(
mongo_address=self.repl_set.uri,
ns_set=['test.test'],
doc_managers=(docman,),
gridfs_set=['test.test']
)
retry_until_ok(self.conn.test.test.drop)
retry_until_ok(self.conn.test.test.files.drop)
retry_until_ok(self.conn.test.test.chunks.drop)
self._remove()
self.connector.start()
assert_soon(lambda: len(self.connector.shard_set) > 0)
def tearDown(self):
self.connector.join()
def test_insert(self):
"""Tests insert
"""
self.conn['test']['test'].insert({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.solr_conn.search('*:*')) > 0)
result_set_1 = list(self.solr_conn.search('paulie'))
self.assertEqual(len(result_set_1), 1)
result_set_2 = self.conn['test']['test'].find_one()
for item in result_set_1:
self.assertEqual(item['_id'], str(result_set_2['_id']))
self.assertEqual(item['name'], result_set_2['name'])
def test_remove(self):
"""Tests remove
"""
self.conn['test']['test'].insert({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 1)
self.conn['test']['test'].remove({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 0)
def test_insert_file(self):
"""Tests inserting a gridfs file
"""
fs = GridFS(self.conn['test'], 'test')
test_data = "test_insert_file test file"
id = fs.put(test_data, filename="test.txt", encoding='utf8')
assert_soon(lambda: sum(1 for _ in self.solr_conn.search('*:*')) > 0)
res = list(self.solr_conn.search('test_insert_file'))
self.assertEqual(len(res), 1)
doc = res[0]
self.assertEqual(doc['filename'], "test.txt")
self.assertEqual(doc['_id'], str(id))
self.assertEqual(doc['content'][0].strip(), test_data.strip())
def test_remove_file(self):
"""Tests removing a gridfs file
"""
fs = GridFS(self.conn['test'], 'test')
id = fs.put("test file", filename="test.txt", encoding='utf8')
assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 1)
fs.delete(id)
assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) == 0)
def test_update(self):
"""Test update operations on Solr.
Need to have the following defined in schema.xml:
<field name="a" type="int" indexed="true" stored="true" />
<field name="b.0.c" type="int" indexed="true" stored="true" />
<field name="b.10.c" type="int" indexed="true" stored="true" />
<field name="b.0.e" type="int" indexed="true" stored="true" />
<field name="b.1.d" type="int" indexed="true" stored="true" />
<field name="b.1.f" type="int" indexed="true" stored="true" />
<field name="b.2.e" type="int" indexed="true" stored="true" />
"""
docman = self.connector.doc_managers[0]
# Insert
self.conn.test.test.insert({"a": 0})
assert_soon(lambda: sum(1 for _ in self._search("*:*")) == 1)
def check_update(update_spec):
updated = self.conn.test.test.find_and_modify(
{"a": 0},
update_spec,
new=True
)
# Stringify _id to match what will be retrieved from Solr
updated['_id'] = str(updated['_id'])
# Flatten the MongoDB document to match Solr
updated = docman._clean_doc(updated, 'dummy.namespace', 0)
# Allow some time for update to propagate
time.sleep(1)
replicated = list(self._search("a:0"))[0]
# Remove add'l fields until these are stored in a separate Solr core
updated.pop('_ts')
replicated.pop('_ts')
updated.pop('ns')
replicated.pop('ns')
# Remove field added by Solr
replicated.pop("_version_")
self.assertEqual(replicated, updated)
# Update by adding a field.
# Note that Solr can't mix types within an array
check_update({"$set": {"b": [{"c": 10}, {"d": 11}]}})
# Update by setting an attribute of a sub-document beyond end of array.
check_update({"$set": {"b.10.c": 42}})
# Update by changing a value within a sub-document (contains array)
check_update({"$inc": {"b.0.c": 1}})
# Update by changing the value within an array
check_update({"$inc": {"b.1.f": 12}})
# Update by adding new bucket to list
check_update({"$push": {"b": {"e": 12}}})
# Update by replacing an entire sub-document
check_update({"$set": {"b.0": {"e": 4}}})
# Update by adding a sub-document
check_update({"$set": {"b": {"0": {"c": 100}}}})
# Update whole document
check_update({"a": 0, "b": {"1": {"d": 10000}}})
def test_rollback(self):
"""Tests rollback. We force a rollback by inserting one doc, killing
primary, adding another doc, killing the new primary, and
restarting both the servers.
"""
primary_conn = self.repl_set.primary.client()
self.conn['test']['test'].insert({'name': 'paul'})
assert_soon(
lambda: self.conn.test.test.find({'name': 'paul'}).count() == 1)
assert_soon(
lambda: sum(1 for _ in self.solr_conn.search('*:*')) == 1)
self.repl_set.primary.stop(destroy=False)
new_primary_conn = self.repl_set.secondary.client()
admin_db = new_primary_conn['admin']
while admin_db.command("isMaster")['ismaster'] is False:
time.sleep(1)
time.sleep(5)
retry_until_ok(self.conn.test.test.insert,
{'name': 'pauline'})
assert_soon(lambda: sum(1 for _ in self.solr_conn.search('*:*')) == 2)
result_set_1 = list(self.solr_conn.search('pauline'))
result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'})
self.assertEqual(len(result_set_1), 1)
for item in result_set_1:
self.assertEqual(item['_id'], str(result_set_2['_id']))
self.repl_set.secondary.stop(destroy=False)
self.repl_set.primary.start()
while primary_conn['admin'].command("isMaster")['ismaster'] is False:
time.sleep(1)
self.repl_set.secondary.start()
time.sleep(2)
result_set_1 = self.solr_conn.search('pauline')
self.assertEqual(sum(1 for _ in result_set_1), 0)
result_set_2 = self.solr_conn.search('paul')
self.assertEqual(sum(1 for _ in result_set_2), 1)
def test_valid_fields(self):
""" Tests documents with field definitions
"""
inserted_obj = self.conn['test']['test'].insert(
{'name': 'test_valid'})
self.conn['test']['test'].update(
{'_id': inserted_obj},
{'$set': {'popularity': 1}}
)
docman = self.connector.doc_managers[0]
assert_soon(lambda: sum(1 for _ in self._search("*:*")) > 0)
result = docman.get_last_doc()
self.assertIn('popularity', result)
self.assertEqual(sum(1 for _ in self._search(
"name=test_valid")), 1)
def test_invalid_fields(self):
""" Tests documents without field definitions
"""
inserted_obj = self.conn['test']['test'].insert(
{'name': 'test_invalid'})
self.conn['test']['test'].update(
{'_id': inserted_obj},
{'$set': {'break_this_test': 1}}
)
docman = self.connector.doc_managers[0]
assert_soon(lambda: sum(1 for _ in self._search("*:*")) > 0)
result = docman.get_last_doc()
self.assertNotIn('break_this_test', result)
self.assertEqual(sum(1 for _ in self._search(
"name=test_invalid")), 1)
def test_dynamic_fields(self):
""" Tests dynamic field definitions
The following fields are supplied in the provided schema.xml:
<dynamicField name="*_i" type="int" indexed="true" stored="true"/>
<dynamicField name="i_*" type="int" indexed="true" stored="true"/>
Cases:
1. Match on first definition
2. Match on second definition
3. No match
"""
self.solr_conn.delete(q='*:*')
match_first = {"_id": 0, "foo_i": 100}
match_second = {"_id": 1, "i_foo": 200}
match_none = {"_id": 2, "foo": 300}
# Connector is already running
self.conn["test"]["test"].insert(match_first)
self.conn["test"]["test"].insert(match_second)
self.conn["test"]["test"].insert(match_none)
# Should have documents in Solr now
assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) > 0,
"Solr doc manager should allow dynamic fields")
# foo_i and i_foo should be indexed, foo field should not exist
self.assertEqual(sum(1 for _ in self.solr_conn.search("foo_i:100")), 1)
self.assertEqual(sum(1 for _ in self.solr_conn.search("i_foo:200")), 1)
# SolrError: "undefined field foo"
logger = logging.getLogger("pysolr")
logger.error("You should see an ERROR log message from pysolr here. "
"This indicates success, not an error in the test.")
with self.assertRaises(SolrError):
self.solr_conn.search("foo:300")
def test_nested_fields(self):
"""Test indexing fields that are sub-documents in MongoDB
The following fields are defined in the provided schema.xml:
<field name="person.address.street" type="string" ... />
<field name="person.address.state" type="string" ... />
<dynamicField name="numbers.*" type="string" ... />
<dynamicField name="characters.*" type="string" ... />
"""
# Connector is already running
self.conn["test"]["test"].insert({
"name": "Jeb",
"billing": {
"address": {
"street": "12345 Mariposa Street",
"state": "California"
}
}
})
self.conn["test"]["test"].insert({
"numbers": ["one", "two", "three"],
"characters": [
{"name": "Big Bird",
"color": "yellow"},
{"name": "Elmo",
"color": "red"},
"Cookie Monster"
]
})
assert_soon(lambda: sum(1 for _ in self.solr_conn.search("*:*")) > 0,
"documents should have been replicated to Solr")
# Search for first document
results = self.solr_conn.search(
"billing.address.street:12345\ Mariposa\ Street")
self.assertEqual(len(results), 1)
self.assertEqual(next(iter(results))["billing.address.state"],
"California")
# Search for second document
results = self.solr_conn.search(
"characters.1.color:red")
self.assertEqual(len(results), 1)
self.assertEqual(next(iter(results))["numbers.2"], "three")
results = self.solr_conn.search("characters.2:Cookie\ Monster")
self.assertEqual(len(results), 1)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 17 13:29:13 2016
@author: Adam Ek
"""
from WSM import DataReader
from WSM import RandomVectorizer
from WSM import TermRelevance
from WSM import Contexter
from WSM import Similarity
from WSM import DataOptions
import sys
def main():
print('Welcome to Distributial Semantics with Random Indexing\n')
new_data = False
settings = []
#< init data
while True:
if new_data:
print('Enter new data by typing "new <path>" , "set setting value" to change context settings and finish by typing "apply"\n')
else:
print('Enter new data source by typing "new <path>" and load saved data by typing "load <name>"\n')
setup = input('> ')
setup = setup.split()
if len(setup) == 0:
print('Please try again')
#< !!!
elif setup[0] == 'load':
if not new_data:
try:
dt = DataOptions()
word_vector_vocabulary, documents, data_info = dt.load(setup[1])
break
except Exception as e:
print('Try again\n', e)
#< input a new data source
elif setup[0] == 'new':
new_data = True
# set1 = ['/home/usr1/git/dist_data/test_doc_3.txt', '/home/usr1/git/dist_data/test_doc_4.txt']
# set2 = ['/home/usr1/git/dist_data/austen-emma.txt', '/home/usr1/git/dist_data/austen-sense.txt', '/home/usr1/git/dist_data/austen-persuasion.txt', '/home/usr1/git/dist_data/blake-poems.txt', '/home/usr1/git/dist_data/bryant-stories.txt', '/home/usr1/git/dist_data/burgess-busterbrown.txt', '/home/usr1/git/dist_data/carroll-alice.txt', '/home/usr1/git/dist_data/chesterton-brown.txt', '/home/usr1/git/dist_data/chesterton-thursday.txt', '/home/usr1/git/dist_data/edgeworth-parents.txt', '/home/usr1/git/dist_data/melville-moby_dick.txt', '/home/usr1/git/dist_data/milton-paradise.txt', '/home/usr1/git/dist_data/shakespeare-hamlet.txt', '/home/usr1/git/dist_data/shakespeare-macbeth.txt', '/home/usr1/git/dist_data/whitman-leaves.txt']#, '/home/usr1/Python_Prg_1/SU_PY/project/et_45.txt']
# set2 = ['/home/usr1/git/dist_data/corpus/et_45.txt']
set2 = ['/home/usr1/git/dist_data/test_doc_4.txt']
dr = DataReader()
sentences, vocabulary, documents = dr.preprocess_data(set2)
rv = RandomVectorizer()
vector_vocabulary = rv.vocabulary_vectorizer(vocabulary)
#< apply precessed data
elif setup[0] == 'apply':
if new_data:
wgt = TermRelevance(documents)
vector_vocabulary = wgt.weight(vector_vocabulary)
#TODO: !!! handle weight_list
rc = Contexter(vector_vocabulary)
word_vector_vocabulary = rc.process_data(sentences)
dt = DataOptions(word_vector_vocabulary, documents, rc.data_info, wgt.weight_setup)
break
else:
print('Invalid command')
# #< change settings before data is applied with command "apply"
elif setup[0] == 'set':
if setup[1] == 'context':
settings[0] = setup[2]
elif setup[1] == 'window':
settings[1] = setup[2]
else:
print('Invalid input')
#< exit
elif setup[0] == 'exit':
sys.exit()
else:
print('Invalid input')
#< User interface after data has been loaded
print('Type "sim <word1> <word2>" for similarity between two words, "top <word>" for top 3 similar words, "help" to display availible commands and "exit" to quit\n')
sim = Similarity(word_vector_vocabulary)
while True:
choice = input('> ')
input_args = choice.split()
#< empty input
if not input_args:
print('Please try again\n')
#< RI similarity between words
elif input_args[0] == 'sim':
try:
sim_res = sim.cosine_similarity(input_args[1].lower(), input_args[2].lower())
if sim_res == str(sim_res):
print(sim_res)
else:
print('Cosine similarity between "{0}" and "{1}" is\n {2}\n'.format(input_args[1], input_args[2], sim_res))
except Exception as e:
print('Invalid input for "sim"\n', e)
elif input_args[0] == 'top':
# try:
top_res = sim.top_similarity(input_args[1].lower())
if top_res == str(top_res):
print(top_res)
else:
print('Top similar words for "{0}" is:'.format(input_args[1]))
for i, (dist, word) in enumerate(top_res):
print(i+1, dist, word)
print('')
# except Exception as e:
# print(e)
# print('Invalid input for "top"\n')
#< quit
elif input_args[0] == 'exit':
break
#< save data
elif input_args[0] == 'save':
# try:
print(dt.save(input_args[1], word_vector_vocabulary, documents, rc.data_info, wgt.weight_setup))
# except Exception as e:
# print('Error\n{0}'.format(e))
#< update data
# 1. DATAREADER: provide new texts => get updated vocabulary, updated document_lists
# 2. VECTORS: Create new
# 3. WEIGHTER: make new weights with old_documents + new
# 4. CONTEXTER: read the new sentences + history
# 5. SIMILARITY: Init with new vocabulary
#
# elif input_args[0] == 'update':
# try:
# new_data = ri.process_data(input_args[1:])
# random_i = wgt.weighter(new_data[2])
# rc = ReadContexts(random_i, data['data_info']['context'], data['data_info']['window'])
# data = rc.read_data(process_data[1])
# except Exception as e:
# print('Update failed\n{0}'.format(e))
#
#< info about dataset or word
elif input_args[0] == 'info':
# try:
if len(input_args) == 1:
documents, data_info = dt.info()
print('Data info: {0}'.format(data_info['name']))
print('Weighting scheme: {0}'.format(data_info['weights']))
print('Context type: {0}'.format(data_info['context']))
print('Context window size: {0}\n'.format(data_info['window']))
print('Total documents: {0}'.format(len(documents.keys())))
print('Unique words: {0}'.format(sum([len(documents[x].keys()) for x in documents])))
print('Total words: {0}\n'.format(sum([sum(documents[x].values()) for x in documents])))
else:
if input_args[1] == '-weights':
if len(input_args) == 3:
print(wgt.word_weights[input_args[2].lower()])
else:
print(wgt.weight_setup)
elif input_args[1] == '-docs':
documents = dt.info('-docs')
print('Document \t\t Unique \t Total')
for doc_info in documents:
print('{0} \t {1} \t {2}'.format(doc_info, len(documents[doc_info].keys()), sum(documents[doc_info].values())))
print('')
else:
documents, stemmed_word = dt.info(input_args[1].lower())
print('"{0}" stemmed to "{1}"\n'.format(input_args[1].lower(), stemmed_word))
total = [0, 0]
print('Document \t\t Occurences')
#< TODO fix alignment
for w in documents:
print('{0} \t\t {1}'.format(w, documents[w]))
total[0] += documents[w]
total[1] += 1
print('{0} occurences in {1} documents'.format(total[0], total[1]))
# except Exception as e:
# print(e)
# print('Invalid command')
#< help information
elif input_args[0] == 'help':
print('- Semantic operations')
print('\t"sim <word> <word>" similarity between two words')
print('\t"top <word>" top 5 similar words')
print('- Data operations')
print('\t"save <name>" save current data')
print('\t"info" information about the data')
print('\t"info <word>" information about a word')
print('- ETC')
print('\t"exit" to quit\n')
else:
print('Unrecognized command')
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from datetime import datetime
from pytz import UTC
from bqdm.action.table import TableAction
from bqdm.model.schema import BigQuerySchemaField
from bqdm.model.table import BigQueryTable
class TestTableAction(unittest.TestCase):
def test_get_add_tables(self):
source_table1_1 = BigQueryTable(
table_id='test1',
friendly_name='test_friendly_name',
description='test_description'
)
source_table1_2 = BigQueryTable(
table_id='test2',
friendly_name='foo_bar',
description='fizz_buzz'
)
target_table1_1 = BigQueryTable(
table_id='test1',
friendly_name='test_friendly_name',
description='test_description'
)
target_table1_2 = BigQueryTable(
table_id='test2',
friendly_name='foo_bar',
description='fizz_buzz'
)
source1 = [source_table1_1]
target1 = [target_table1_1, target_table1_2]
actual_count1, actual_results1 = TableAction.get_add_tables(source1, target1)
self.assertEqual(actual_count1, 1)
self.assertEqual(actual_results1, (target_table1_2, ))
source2 = [source_table1_1, source_table1_2]
target2 = [target_table1_1, target_table1_2]
actual_count2, actual_results2 = TableAction.get_add_tables(source2, target2)
self.assertEqual(actual_count2, 0)
self.assertEqual(actual_results2, ())
source3 = [source_table1_1, source_table1_2]
target3 = [target_table1_1]
actual_count3, actual_results3 = TableAction.get_add_tables(source3, target3)
self.assertEqual(actual_count3, 0)
self.assertEqual(actual_results3, ())
def test_get_change_tables(self):
schema_field1 = BigQuerySchemaField(
name='test',
field_type='INTEGER',
mode='NULLABLE',
description='test_description'
)
schema_field2 = BigQuerySchemaField(
name='test',
field_type='STRING',
mode='REQUIRED',
description='foo_bar'
)
label1 = {
'foo': 'bar'
}
label2 = {
'fizz': 'buzz'
}
source_table1_1 = BigQueryTable(
table_id='test1',
friendly_name='test_friendly_name',
description='test_description'
)
source_table1_2 = BigQueryTable(
table_id='test2',
friendly_name='test_friendly_name',
description='test_description',
expires=datetime(2018, 1, 1, 0, 0, 0, tzinfo=UTC),
partitioning_type='DAY'
)
source_table1_3 = BigQueryTable(
table_id='test3',
friendly_name='test_friendly_name',
description='test_description',
view_use_legacy_sql=False,
view_query='SELECT * FROM bigquery_datasetmanager.test.test'
)
source_table1_4 = BigQueryTable(
table_id='test4',
friendly_name='test_friendly_name',
description='test_description',
schema=(schema_field1, )
)
source_table1_5 = BigQueryTable(
table_id='test5',
friendly_name='test_friendly_name',
description='test_description',
labels=label1
)
target_table1_1 = BigQueryTable(
table_id='test1',
friendly_name='foo_bar',
description='fizz_buzz'
)
target_table1_2 = BigQueryTable(
table_id='test2',
friendly_name='test_friendly_name',
description='test_description',
expires=datetime(2019, 1, 1, 0, 0, 0, tzinfo=UTC)
)
target_table1_3 = BigQueryTable(
table_id='test3',
friendly_name='test_friendly_name',
description='test_description',
view_use_legacy_sql=True,
view_query='SELECT * FROM bigquery_datasetmanager.foo.bar'
)
target_table1_4 = BigQueryTable(
table_id='test4',
friendly_name='test_friendly_name',
description='test_description',
schema=(schema_field2, )
)
target_table1_5 = BigQueryTable(
table_id='test5',
friendly_name='test_friendly_name',
description='test_description',
labels=label2
)
source1 = [source_table1_1, source_table1_2, source_table1_3,
source_table1_4, source_table1_5]
target1 = [target_table1_1, target_table1_2, target_table1_3,
target_table1_4, target_table1_5]
actual_count1, actual_results1 = TableAction.get_change_tables(source1, target1)
self.assertEqual(actual_count1, 5)
self.assertEqual(set(actual_results1),
{target_table1_1, target_table1_2, target_table1_3,
target_table1_4, target_table1_5})
source2 = [source_table1_5, source_table1_4, source_table1_3,
source_table1_2, source_table1_1]
target2 = [target_table1_1, target_table1_2, target_table1_3,
target_table1_4, target_table1_5]
actual_count2, actual_results2 = TableAction.get_change_tables(source2, target2)
self.assertEqual(actual_count2, 5)
self.assertEqual(set(actual_results2),
{target_table1_1, target_table1_2, target_table1_3,
target_table1_4, target_table1_5})
source3 = [source_table1_1, source_table1_2, source_table1_3,
source_table1_4, source_table1_5]
target3 = [source_table1_1, source_table1_2, source_table1_3,
source_table1_4, source_table1_5]
actual_count3, actual_results3 = TableAction.get_change_tables(source3, target3)
self.assertEqual(actual_count3, 0)
self.assertEqual(actual_results3, ())
source4 = [source_table1_1, source_table1_2, source_table1_3,
source_table1_4, source_table1_5]
target4 = [source_table1_5, source_table1_4, source_table1_3,
source_table1_2, source_table1_1]
actual_count4, actual_results4 = TableAction.get_change_tables(source4, target4)
self.assertEqual(actual_count4, 0)
self.assertEqual(actual_results4, ())
source5 = [target_table1_1, target_table1_2, target_table1_3,
target_table1_4, target_table1_5]
target5 = [target_table1_1, target_table1_2, target_table1_3,
target_table1_4, target_table1_5]
actual_count5, actual_results5 = TableAction.get_change_tables(source5, target5)
self.assertEqual(actual_count5, 0)
self.assertEqual(actual_results5, ())
source6 = [target_table1_1, target_table1_2, target_table1_3,
target_table1_4, target_table1_5]
target6 = [target_table1_5, target_table1_4, target_table1_3,
target_table1_2, target_table1_1]
actual_count6, actual_results6 = TableAction.get_change_tables(source6, target6)
self.assertEqual(actual_count6, 0)
self.assertEqual(actual_results6, ())
source7 = [source_table1_1, source_table1_2, source_table1_3,
source_table1_4, source_table1_5]
target7 = [target_table1_1, target_table1_2]
actual_count7, actual_results7 = TableAction.get_change_tables(source7, target7)
self.assertEqual(actual_count7, 2)
self.assertEqual(set(actual_results7), {target_table1_1, target_table1_2})
source8 = [source_table1_1, source_table1_2, source_table1_3,
source_table1_4, source_table1_5]
target8 = [target_table1_3, target_table1_4, target_table1_5]
actual_count8, actual_results8 = TableAction.get_change_tables(source8, target8)
self.assertEqual(actual_count8, 3)
self.assertEqual(set(actual_results8),
{target_table1_3, target_table1_4, target_table1_5})
source9 = [source_table1_1, source_table1_2]
target9 = [target_table1_1, target_table1_2, target_table1_3,
target_table1_4, target_table1_5]
actual_count9, actual_results9 = TableAction.get_change_tables(source9, target9)
self.assertEqual(actual_count9, 2)
self.assertEqual(set(actual_results9), {target_table1_1, target_table1_2})
source10 = [source_table1_3, source_table1_4, source_table1_5]
target10 = [target_table1_1, target_table1_2, target_table1_3,
target_table1_4, target_table1_5]
actual_count10, actual_results10 = TableAction.get_change_tables(source10, target10)
self.assertEqual(actual_count10, 3)
self.assertEqual(set(actual_results10),
{target_table1_3, target_table1_4, target_table1_5})
def test_get_destroy_tables(self):
source_table1_1 = BigQueryTable(
table_id='test1',
friendly_name='test_friendly_name',
description='test_description'
)
source_table1_2 = BigQueryTable(
table_id='test2',
friendly_name='foo_bar',
description='fizz_buzz'
)
target_table1_1 = BigQueryTable(
table_id='test1',
friendly_name='test_friendly_name',
description='test_description'
)
target_table1_2 = BigQueryTable(
table_id='test2',
friendly_name='foo_bar',
description='fizz_buzz'
)
source1 = [source_table1_1]
target1 = [target_table1_1, target_table1_2]
actual_count1, actual_results1 = TableAction.get_destroy_tables(source1, target1)
self.assertEqual(actual_count1, 0)
self.assertEqual(actual_results1, ())
source2 = [source_table1_1, source_table1_2]
target2 = [target_table1_1, target_table1_2]
actual_count2, actual_results2 = TableAction.get_destroy_tables(source2, target2)
self.assertEqual(actual_count2, 0)
self.assertEqual(actual_results2, ())
source3 = [source_table1_1, source_table1_2]
target3 = [target_table1_1]
actual_count3, actual_results3 = TableAction.get_destroy_tables(source3, target3)
self.assertEqual(actual_count3, 1)
self.assertEqual(actual_results3, (source_table1_2, ))
source4 = [source_table1_1, source_table1_2]
target4 = []
actual_count4, actual_results4 = TableAction.get_destroy_tables(source4, target4)
self.assertEqual(actual_count4, 2)
self.assertEqual(set(actual_results4), {source_table1_1, source_table1_2, })
source5 = []
target5 = [target_table1_1, target_table1_2]
actual_count5, actual_results5 = TableAction.get_destroy_tables(source5, target5)
self.assertEqual(actual_count5, 0)
self.assertEqual(actual_results5, ())
def test_build_query_field(self):
source_schema_field1 = BigQuerySchemaField(
name='test1',
field_type='STRING',
mode='NULLABLE',
description='test_description'
)
source_schema_field2 = BigQuerySchemaField(
name='test2',
field_type='INTEGER',
mode='NULLABLE',
description='test_description'
)
source_schema_field3 = BigQuerySchemaField(
name='test3',
field_type='RECORD',
mode='NULLABLE',
description='test_description',
fields=(
BigQuerySchemaField(
name='foo_bar',
field_type='STRING',
mode='NULLABLE',
description='test_description'
),
)
)
source_schema_field4 = BigQuerySchemaField(
name='test4',
field_type='RECORD',
mode='NULLABLE',
description='test_description',
fields=(
BigQuerySchemaField(
name='fizz',
field_type='INTEGER',
mode='NULLABLE',
description='test_description'
),
BigQuerySchemaField(
name='buzz',
field_type='BOOL',
mode='NULLABLE',
description='test_description'
),
)
)
target_schema_field1 = BigQuerySchemaField(
name='test1',
field_type='INTEGER',
mode='NULLABLE',
description='test_description'
)
target_schema_field2 = BigQuerySchemaField(
name='test2',
field_type='STRING',
mode='NULLABLE',
description='test_description'
)
target_schema_field3 = BigQuerySchemaField(
name='test3',
field_type='RECORD',
mode='NULLABLE',
description='test_description',
fields=(
BigQuerySchemaField(
name='foo_bar',
field_type='INTEGER',
mode='NULLABLE',
description='test_description'
),
)
)
target_schema_field4 = BigQuerySchemaField(
name='test4',
field_type='RECORD',
mode='NULLABLE',
description='test_description',
fields=(
BigQuerySchemaField(
name='fizz',
field_type='FLOAT',
mode='NULLABLE',
description='test_description'
),
BigQuerySchemaField(
name='buzz',
field_type='STRING',
mode='NULLABLE',
description='test_description'
),
)
)
source1 = (source_schema_field1, source_schema_field2,
source_schema_field3, source_schema_field4)
target1 = (target_schema_field1, target_schema_field2,
target_schema_field3, target_schema_field4)
expected_query_field = 'cast(test1 AS INT64) AS test1, ' \
'cast(test2 AS STRING) AS test2, ' \
'struct(cast(test3.foo_bar AS INT64) AS foo_bar) AS test3, ' \
'struct(cast(test4.fizz AS FLOAT64) AS fizz, ' \
'cast(test4.buzz AS STRING) AS buzz) AS test4'
actual_query_field = TableAction.build_query_field(source1, target1)
self.assertEqual(expected_query_field, actual_query_field)
# TODO
# test_plan_add
# test_add
# test_plan_change
# test_change
# test_plan_destroy
# test_destroy
# TODO
# test_backup
# test_select_insert
# test_create_temporary_table
# TODO
# test_list_tables
# test_export
|
|
import os
from .intstream import from_bytes
from .Curve import Curve
from .Point import Point
from .rfc6979 import deterministic_generate_k
class Generator(Curve, Point):
"""
A Generator is a specific point on an elliptic curve that defines a `trapdoor
function <https://en.wikipedia.org/wiki/Trapdoor_function>`_ from integers to curve points.
:param p: the prime for the :class:`Curve <pycoin.ecdsa.Curve.Curve>`
:param a: the a value for the :class:`Curve <pycoin.ecdsa.Curve.Curve>`
:param b: the b value for the :class:`Curve <pycoin.ecdsa.Curve.Curve>`
:param basis: a :class:`Point <pycoin.ecdsa.Point.Point>` on the
given :class:`Curve <pycoin.ecdsa.Curve.Curve>`
:param order: the order for the :class:`Curve <pycoin.ecdsa.Curve.Curve>`
The constructor raises :class:`NoSuchPointError` if the point is invalid.
The point at infinity is ``(x, y) == (None, None)``.
"""
def __new__(self, p, a, b, basis, order):
# since Generator extends tuple (via Point), we need to override __new__
return tuple.__new__(self, basis)
def __init__(self, p, a, b, basis, order, entropy_f=os.urandom):
"""
Set up a group with generator basis for the curve y^2 = x^3 + x*a + b (mod p).
The order is the order of the group (it's generally predetermined for a given curve;
how it's calculated is complicated).
The entropy function creates a blinding factor, to mitigate side channel attacks.
"""
Curve.__init__(self, p, a, b, order)
Point.__init__(self, basis[0], basis[1], self)
self._powers = []
Gp = self
for _ in range(256):
self._powers.append(Gp)
Gp += Gp
assert p % 4 == 3, "p % 4 must be 3 due to modular_sqrt optimization"
self._mod_sqrt_power = (p + 1) // 4
self._blinding_factor = from_bytes(entropy_f(32)) % self._order
self._minus_blinding_factor_g = self.raw_mul(-self._blinding_factor)
def modular_sqrt(self, a):
"""
:return: n where ``n * n == a (mod p) for the curve's prime p``.
If no such n exists, an arbitrary value will be returned.
"""
return pow(a, self._mod_sqrt_power, self._p)
def inverse(self, a):
":return: n where ``a * n == 1 (mod p) for the curve's prime p``."
return self.inverse_mod(a, self._order)
def points_for_x(self, x):
"""
:param: x: an integer x coordinate
:return: (p0, p1) where each p is a :class:`Point` with given x coordinate,
and p0's y value is even.
To get a point with particular parity, use::
points_for_x(x)[1 if is_y_supposed_to_be_odd else 0]
"""
p = self._p
alpha = (pow(x, 3, p) + self._a * x + self._b) % p
y0 = self.modular_sqrt(alpha)
if y0 == 0:
raise ValueError("no y value for %d" % x)
p0, p1 = [self.Point(x, _) for _ in (y0, p - y0)]
if y0 & 1 == 0:
return (p0, p1)
return (p1, p0)
def possible_public_pairs_for_signature(self, value, signature, y_parity=None):
"""
:param: value: an integer value
:param: signature: an ``(r, s)`` pair of integers representing an ecdsa signature of ``value``
:param: y_parity: (optional) for a given value and signature, there are either two points
that sign it, or none if the signature is invalid. One of the points has an even y
value, the other an odd. If this parameter is set, only points whose y value matches
this value will be returned in the list.
:return: a list of :class:`Point <pycoin.ecdsa.Point.Point>` objects p where each p is
a possible public key for which ``signature`` correctly signs the given ``value``.
If something goes wrong, this list will be empty.
"""
r, s = signature
try:
points = self.points_for_x(r)
except ValueError:
return []
if y_parity is not None:
if y_parity & 1:
points = points[1:]
else:
points = points[:1]
inv_r = self.inverse(r)
s_over_r = s * inv_r
minus_E_over_r = -(inv_r * value) * self
try:
return [s_over_r * p + minus_E_over_r for p in points]
except ValueError:
return []
def raw_mul(self, e):
"""
:param: e: an integer value
:returns: e * self
This method uses a precomputed table as an optimization.
"""
e %= self._order
P = self._infinity
for bit in range(256):
# add the power of the generator every time to make it more time-deterministic
a = [P, P + self._powers[bit]]
# choose the correct result
P = a[e & 1]
e >>= 1
return P
def __mul__(self, e):
"""Multiply the generator by an integer. Uses the blinding factor."""
return self.raw_mul(e + self._blinding_factor) + self._minus_blinding_factor_g
def __rmul__(self, e):
"""Multiply the generator by an integer."""
return self.__mul__(e)
def verify(self, public_pair, val, sig):
"""
:param: public_pair: a :class:`Point <pycoin.ecdsa.Point.Point>` on the curve
:param: val: an integer value
:param: sig: a pair of integers ``(r, s)`` representing an ecdsa signature
:returns: True if and only if the signature ``sig`` is a valid signature
of ``val`` using ``public_pair`` public key.
"""
order = self._order
if val == 0:
return False
r, s = sig
if r < 1 or r >= order or s < 1 or s >= order:
return False
s_inverse = self.inverse(s)
u1 = val * s_inverse
u2 = r * s_inverse
point = u1 * self + u2 * self.Point(*public_pair)
v = point[0] % order
return v == r
def sign_with_recid(self, secret_exponent, val, gen_k=None):
"""
:param: secret_exponent: a :class:`Point <pycoin.ecdsa.Point.Point>` on the curve
:param: val: an integer value
:param: gen_k: a function generating __k values__
:returns: a tuple of integers ``(r, s, recid)`` where ``(r, s)`` represents an ecdsa
signature of ``val`` with public key ``self * secret_exponent``; and ``recid``
is the **recovery id**, a number from 0-3 used to eliminate ambiguity about
which public key signed the value.
If gen_k is set, it will be called with (n, secret_exponent, val), and an unguessable
K value should be returned. Otherwise, the default K value, generated according
to rfc6979 will be used.
"""
if val == 0:
raise ValueError()
if gen_k is None:
gen_k = deterministic_generate_k
n = self._order
k = gen_k(n, secret_exponent, val)
while True:
p1 = k * self
r = p1[0] % n
s = (self.inverse(k) * (val + (secret_exponent * r) % n)) % n
if r != 0 and s != 0:
recid = p1[1] & 1
if p1[0] > n:
recid += 2
return r, s, recid
k += 1
def sign(self, secret_exponent, val, gen_k=None):
"""
:param: secret_exponent: a :class:`Point <pycoin.ecdsa.Point.Point>` on the curve
:param: val: an integer value
:param: gen_k: a function generating __k values__
:returns: a pair of integers ``(r, s)`` represents an ecdsa signature of ``val``
with public key ``self * secret_exponent``.
If gen_k is set, it will be called with (n, secret_exponent, val), and an unguessable
K value should be returned. Otherwise, the default K value, generated according
to rfc6979 will be used.
"""
return self.sign_with_recid(secret_exponent, val, gen_k)[0:2]
|
|
#!/usr/bin/env python
# Copyright 2017 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------------------
# Updates ESX with vSphere Docker Volume Service 0.11 (and earlier)
# to 0.11.1 and further
# -------------------------------------------------------------------------------------------
import os
import os.path
import sqlite3
import sys
import shutil
import vmdk_ops
# vmdkops python utils are in PY_LOC, so add to path.
sys.path.insert(0, vmdk_ops.PY_LOC)
import vmdk_utils
import auth
import auth_data
# Hard coded (in auth package) UUD for default tenant.
STATIC_UUID = auth.DEFAULT_TENANT_UUID
STATIC_NAME = auth.DEFAULT_TENANT
# CLI return codes
OK = 0
ERROR = 1
# do we need to stop and restart the vmdkops service
STOP_SERVICE = True # 'False' is for debug only - makes it faster
def patch_a_store(ds_path, old_uuid):
"""Renames and moves stuff as needed in a single DS/dockvols"""
print("Working on Datastore '{0}'".format(ds_path))
# move stuff from old_uuid to new_uuid ()
old_dir = os.path.join(ds_path, old_uuid)
new_dir = os.path.join(ds_path, STATIC_UUID)
symlink_name = os.path.join(ds_path, STATIC_NAME)
if not os.path.isdir(old_dir):
print(" Skipping {0} - not found".format(old_dir))
return
if os.path.exists(new_dir):
# target exists , move files and remove oldir
print(" Moving from {0}, to {1}".format(old_dir, new_dir))
for f in os.listdir(old_dir):
src = os.path.join(old_dir, f)
dst = os.path.join(new_dir, f)
if os.path.isfile(dst):
print(" File {0} already exists, skipping the move".format(dst))
continue
shutil.move(src, dst)
if not os.listdir(old_dir):
print(" Deleting empty {0}".format(old_dir))
os.rmdir(old_dir)
else:
print(" *** Warning: {0} is not empty after migration. Please check the content.")
else:
print(" Renaming {0} to {1}".format(old_dir, new_dir))
os.rename(old_dir, new_dir)
print(" Adjusting {0} symlink to pont to {1}".format(symlink_name, STATIC_UUID))
try:
os.remove(symlink_name)
except:
pass
os.symlink(STATIC_UUID, symlink_name)
def main():
"""
This code updates ESX with vSphere Docker Volume Service 0.11 (and earlier)
to 0.11.1 and further, by moving _DEFAULT tenant ID to well known and static UUID,
and then correcting directories layout and auth_db tables to comply with new UUID.
Specifically, it does the following:
- Checks if AUTH_DB exists.
If it does not, exit with a message - it means nothing to patch on this ESX
- Gets uuid (aka "old_uuid') for _DEFAULT tenant from DB.
If it already STATIC_UUID , exit with a message - nothing to patch
- Stops the service
- backs up the DB
- scans through all <datastore>/volumes/dockvols and
- mkdir STATIC_UUID, if it does not exist
- move all from old_uuid to STATIC_UUID
- symlinks "_DEFAULT" to STATIC_UUID
In single DB transcation
- replaces old_uuid with STATIC UUID in tenant_id field for all tables:
(privileges, vms, tenants, volumes)
starts the service , and if all good removes backup DB
NOTE: this does not delete any data, so the Docker volumes will stay around
no matter if the code succeeds or fails
"""
dbfile = auth_data.AUTH_DB_PATH
# STEP: check DB presense and fetch old_uuid
if not os.path.isfile(dbfile):
print("Config DB", dbfile, "is not found, nothing to update - exiting.")
sys.exit(0)
cursor = sqlite3.connect(dbfile).cursor()
cursor.execute("select * from tenants where name='{0}'".format(STATIC_NAME))
try:
tenant_id, tenant_name, tenant_desr, tenant_def_ds = cursor.fetchone()
except TypeError:
print("Can't find '{0}' tenant, exiting".format(STATIC_NAME))
sys.exit(ERROR)
print("Found default tenant: {0} {1} {2} {3}".format(tenant_id,
tenant_name, tenant_desr, tenant_def_ds))
old_uuid = tenant_id
if old_uuid == STATIC_UUID:
print("*** DB seems to have been already migrated, exiting ***")
sys.exit(OK)
# STEP: Stop the service and back up the DB
backup = dbfile + ".bck"
if os.path.isfile(backup):
print("Backup file '{0}' already exists - skipping DB backup".format(backup))
else:
print("Backing up Config DB to '{0}'".format(backup))
shutil.copy(dbfile, backup)
if STOP_SERVICE:
print("Stopping vmdk-opsd service")
os.system("/etc/init.d/vmdk-opsd stop")
# STEP : patch a datastore - convert dir names to new UUID if needed and move files
print("Starting conversion of _DEFAULT tenant directory names. old_uid is {0}".format(old_uuid))
stores = vmdk_utils.get_datastores()
if not stores:
print("Docker volume storage is not initialized - skipping directories patching")
else:
for datastore in stores:
ds_path = datastore[2]
patch_a_store(ds_path, old_uuid)
# STEP: patch database
print("Working on DB patch...")
# sql for update the DB
# note that:
# {0} is old_uuid (default tenant uuid pre-upgrade)
# {1} is new_uuid (default tenant uuid post-upgrade)
# {2} is tmp name - we need it to comply with DB constraints
# {3} is default tenant description (from DB)
# {4} is default DB for default tenant (from DB)
# {5} is the name ("_DEFAULT") for default tenant
# TBD - use named params in formatting
sql_query_template = \
"""
-- insert temp record to make foreign key happy
INSERT INTO tenants VALUES ( '{1}', '{2}', '{3}', '{4}' ) ;
-- update the tables
UPDATE vms SET tenant_id = '{1}' WHERE tenant_id = '{0}';
UPDATE volumes SET tenant_id = '{1}' WHERE tenant_id = '{0}';
UPDATE privileges SET tenant_id = '{1}' WHERE tenant_id = '{0}';
-- recover _DEFAULT tenant record
DELETE FROM tenants WHERE id = '{0}';
UPDATE tenants SET name = '{5}' WHERE name = '{2}';
UPDATE versions SET major_ver=1, minor_ver=1;
"""
tmp_tenant_name = "__tmp_name_upgrade_0_11"
sql_query = sql_query_template.format(old_uuid, STATIC_UUID, tmp_tenant_name,
tenant_desr, tenant_def_ds,
STATIC_NAME)
cursor.executescript(sql_query)
# STEP: restart the service
if STOP_SERVICE:
print("Starting vmdk-opsd service")
os.system("/etc/init.d/vmdk-opsd start")
# TBD: remove backup ?
print ("*** ALL DONE ***")
if __name__ == "__main__":
main()
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import os
from contextlib import contextmanager
from hashlib import md5
import time
import pickle
import mock
import six
from six.moves import urllib
from swift.common import direct_client
from swift.common.direct_client import DirectClientException
from swift.common.exceptions import ClientException
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import Timestamp, quote
from swift.common.swob import RESPONSE_REASONS
from swift.common.storage_policy import POLICIES
from six.moves.http_client import HTTPException
from test.unit import patch_policies, debug_logger
class FakeConn(object):
def __init__(self, status, headers=None, body='', **kwargs):
self.status = status
try:
self.reason = RESPONSE_REASONS[self.status][0]
except Exception:
self.reason = 'Fake'
self.body = body
self.resp_headers = HeaderKeyDict()
if headers:
self.resp_headers.update(headers)
self.etag = None
def _update_raw_call_args(self, *args, **kwargs):
capture_attrs = ('host', 'port', 'method', 'path', 'req_headers',
'query_string')
for attr, value in zip(capture_attrs, args[:len(capture_attrs)]):
setattr(self, attr, value)
return self
def getresponse(self):
if self.etag:
self.resp_headers['etag'] = str(self.etag.hexdigest())
if isinstance(self.status, Exception):
raise self.status
return self
def getheader(self, header, default=None):
return self.resp_headers.get(header, default)
def getheaders(self):
return self.resp_headers.items()
def read(self, amt=None):
if isinstance(self.body, six.StringIO):
return self.body.read(amt)
elif amt is None:
return self.body
else:
return Exception('Not a StringIO entry')
def send(self, data):
if not self.etag:
self.etag = md5()
self.etag.update(data)
@contextmanager
def mocked_http_conn(*args, **kwargs):
fake_conn = FakeConn(*args, **kwargs)
mock_http_conn = lambda *args, **kwargs: \
fake_conn._update_raw_call_args(*args, **kwargs)
with mock.patch('swift.common.bufferedhttp.http_connect_raw',
new=mock_http_conn):
yield fake_conn
@patch_policies
class TestDirectClient(unittest.TestCase):
def setUp(self):
self.node = {'ip': '1.2.3.4', 'port': '6200', 'device': 'sda',
'replication_ip': '1.2.3.5', 'replication_port': '7000'}
self.part = '0'
self.account = u'\u062a account'
self.container = u'\u062a container'
self.obj = u'\u062a obj/name'
self.account_path = '/sda/0/%s' % urllib.parse.quote(
self.account.encode('utf-8'))
self.container_path = '/sda/0/%s/%s' % tuple(
urllib.parse.quote(p.encode('utf-8')) for p in (
self.account, self.container))
self.obj_path = '/sda/0/%s/%s/%s' % tuple(
urllib.parse.quote(p.encode('utf-8')) for p in (
self.account, self.container, self.obj))
self.user_agent = 'direct-client %s' % os.getpid()
def test_gen_headers(self):
stub_user_agent = 'direct-client %s' % os.getpid()
headers = direct_client.gen_headers()
self.assertEqual(headers['user-agent'], stub_user_agent)
self.assertEqual(1, len(headers))
now = time.time()
headers = direct_client.gen_headers(add_ts=True)
self.assertEqual(headers['user-agent'], stub_user_agent)
self.assertTrue(now - 1 < Timestamp(headers['x-timestamp']) < now + 1)
self.assertEqual(headers['x-timestamp'],
Timestamp(headers['x-timestamp']).internal)
self.assertEqual(2, len(headers))
headers = direct_client.gen_headers(hdrs_in={'foo-bar': '47'})
self.assertEqual(headers['user-agent'], stub_user_agent)
self.assertEqual(headers['foo-bar'], '47')
self.assertEqual(2, len(headers))
headers = direct_client.gen_headers(hdrs_in={'user-agent': '47'})
self.assertEqual(headers['user-agent'], stub_user_agent)
self.assertEqual(1, len(headers))
for policy in POLICIES:
for add_ts in (True, False):
now = time.time()
headers = direct_client.gen_headers(
{'X-Backend-Storage-Policy-Index': policy.idx},
add_ts=add_ts)
self.assertEqual(headers['user-agent'], stub_user_agent)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
str(policy.idx))
expected_header_count = 2
if add_ts:
expected_header_count += 1
self.assertEqual(
headers['x-timestamp'],
Timestamp(headers['x-timestamp']).internal)
self.assertTrue(
now - 1 < Timestamp(headers['x-timestamp']) < now + 1)
self.assertEqual(expected_header_count, len(headers))
def test_direct_get_account(self):
stub_headers = HeaderKeyDict({
'X-Account-Container-Count': '1',
'X-Account-Object-Count': '1',
'X-Account-Bytes-Used': '1',
'X-Timestamp': '1234567890',
'X-PUT-Timestamp': '1234567890'})
body = '[{"count": 1, "bytes": 20971520, "name": "c1"}]'
with mocked_http_conn(200, stub_headers, body) as conn:
resp_headers, resp = direct_client.direct_get_account(
self.node, self.part, self.account, marker='marker',
prefix='prefix', delimiter='delimiter', limit=1000,
end_marker='endmarker', reverse='on')
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.account_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(resp_headers, stub_headers)
self.assertEqual(json.loads(body), resp)
self.assertTrue('marker=marker' in conn.query_string)
self.assertTrue('delimiter=delimiter' in conn.query_string)
self.assertTrue('limit=1000' in conn.query_string)
self.assertTrue('prefix=prefix' in conn.query_string)
self.assertTrue('format=json' in conn.query_string)
self.assertTrue('end_marker=endmarker' in conn.query_string)
self.assertTrue('reverse=on' in conn.query_string)
def test_direct_client_exception(self):
stub_headers = {'X-Trans-Id': 'txb5f59485c578460f8be9e-0053478d09'}
body = 'a server error has occurred'
with mocked_http_conn(500, stub_headers, body):
try:
direct_client.direct_get_account(self.node, self.part,
self.account)
except ClientException as err:
pass
else:
self.fail('ClientException not raised')
self.assertEqual(err.http_status, 500)
expected_err_msg_parts = (
'Account server %s:%s' % (self.node['ip'], self.node['port']),
'GET %r' % self.account_path,
'status 500',
)
for item in expected_err_msg_parts:
self.assertTrue(
item in str(err), '%r was not in "%s"' % (item, err))
self.assertEqual(err.http_host, self.node['ip'])
self.assertEqual(err.http_port, self.node['port'])
self.assertEqual(err.http_device, self.node['device'])
self.assertEqual(err.http_status, 500)
self.assertEqual(err.http_reason, 'Internal Error')
self.assertEqual(err.http_headers, stub_headers)
def test_direct_get_account_no_content_does_not_parse_body(self):
headers = {
'X-Account-Container-Count': '1',
'X-Account-Object-Count': '1',
'X-Account-Bytes-Used': '1',
'X-Timestamp': '1234567890',
'X-Put-Timestamp': '1234567890'}
with mocked_http_conn(204, headers) as conn:
resp_headers, resp = direct_client.direct_get_account(
self.node, self.part, self.account)
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.account_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertDictEqual(resp_headers, headers)
self.assertEqual([], resp)
def test_direct_get_account_error(self):
with mocked_http_conn(500) as conn:
try:
direct_client.direct_get_account(
self.node, self.part, self.account)
except ClientException as err:
pass
else:
self.fail('ClientException not raised')
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.account_path)
self.assertEqual(err.http_status, 500)
self.assertTrue('GET' in str(err))
def test_direct_delete_account(self):
node = {'ip': '1.2.3.4', 'port': '6200', 'device': 'sda'}
part = '0'
account = 'a'
mock_path = 'swift.common.bufferedhttp.http_connect_raw'
with mock.patch(mock_path) as fake_connect:
fake_connect.return_value.getresponse.return_value.status = 200
direct_client.direct_delete_account(node, part, account)
args, kwargs = fake_connect.call_args
method = args[2]
self.assertEqual('DELETE', method)
path = args[3]
self.assertEqual('/sda/0/a', path)
headers = args[4]
self.assertTrue('X-Timestamp' in headers)
def test_direct_delete_account_failure(self):
node = {'ip': '1.2.3.4', 'port': '6200', 'device': 'sda'}
part = '0'
account = 'a'
with mocked_http_conn(500) as conn:
try:
direct_client.direct_delete_account(node, part, account)
except ClientException as err:
pass
self.assertEqual('DELETE', conn.method)
self.assertEqual('/sda/0/a', conn.path)
self.assertEqual(err.http_status, 500)
def test_direct_head_container(self):
headers = HeaderKeyDict(key='value')
with mocked_http_conn(200, headers) as conn:
resp = direct_client.direct_head_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'],
self.user_agent)
self.assertEqual(headers, resp)
def test_direct_head_container_error(self):
headers = HeaderKeyDict(key='value')
with mocked_http_conn(503, headers) as conn:
try:
direct_client.direct_head_container(
self.node, self.part, self.account, self.container)
except ClientException as err:
pass
else:
self.fail('ClientException not raised')
# check request
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(err.http_status, 503)
self.assertEqual(err.http_headers, headers)
self.assertTrue('HEAD' in str(err))
def test_direct_head_container_deleted(self):
important_timestamp = Timestamp(time.time()).internal
headers = HeaderKeyDict({'X-Backend-Important-Timestamp':
important_timestamp})
with mocked_http_conn(404, headers) as conn:
try:
direct_client.direct_head_container(
self.node, self.part, self.account, self.container)
except Exception as err:
self.assertTrue(isinstance(err, ClientException))
else:
self.fail('ClientException not raised')
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(err.http_status, 404)
self.assertEqual(err.http_headers, headers)
def test_direct_get_container(self):
headers = HeaderKeyDict({'key': 'value'})
body = '[{"hash": "8f4e3", "last_modified": "317260", "bytes": 209}]'
with mocked_http_conn(200, headers, body) as conn:
resp_headers, resp = direct_client.direct_get_container(
self.node, self.part, self.account, self.container,
marker='marker', prefix='prefix', delimiter='delimiter',
limit=1000, end_marker='endmarker', reverse='on')
self.assertEqual(conn.req_headers['user-agent'],
'direct-client %s' % os.getpid())
self.assertEqual(headers, resp_headers)
self.assertEqual(json.loads(body), resp)
self.assertTrue('marker=marker' in conn.query_string)
self.assertTrue('delimiter=delimiter' in conn.query_string)
self.assertTrue('limit=1000' in conn.query_string)
self.assertTrue('prefix=prefix' in conn.query_string)
self.assertTrue('format=json' in conn.query_string)
self.assertTrue('end_marker=endmarker' in conn.query_string)
self.assertTrue('reverse=on' in conn.query_string)
def test_direct_get_container_no_content_does_not_decode_body(self):
headers = {}
body = ''
with mocked_http_conn(204, headers, body) as conn:
resp_headers, resp = direct_client.direct_get_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.req_headers['user-agent'],
'direct-client %s' % os.getpid())
self.assertEqual(headers, resp_headers)
self.assertEqual([], resp)
def test_direct_delete_container(self):
with mocked_http_conn(200) as conn:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
def test_direct_delete_container_with_timestamp(self):
# ensure timestamp is different from any that might be auto-generated
timestamp = Timestamp(time.time() - 100)
headers = {'X-Timestamp': timestamp.internal}
with mocked_http_conn(200) as conn:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container,
headers=headers)
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
self.assertTrue('X-Timestamp' in conn.req_headers)
self.assertEqual(timestamp, conn.req_headers['X-Timestamp'])
def test_direct_delete_container_error(self):
with mocked_http_conn(500) as conn:
try:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container)
except ClientException as err:
pass
else:
self.fail('ClientException not raised')
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(err.http_status, 500)
self.assertTrue('DELETE' in str(err))
def test_direct_put_container_object(self):
headers = {'x-foo': 'bar'}
with mocked_http_conn(204) as conn:
rv = direct_client.direct_put_container_object(
self.node, self.part, self.account, self.container, self.obj,
headers=headers)
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertEqual(rv, None)
def test_direct_put_container_object_error(self):
with mocked_http_conn(500) as conn:
try:
direct_client.direct_put_container_object(
self.node, self.part, self.account, self.container,
self.obj)
except ClientException as err:
pass
else:
self.fail('ClientException not raised')
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(err.http_status, 500)
self.assertTrue('PUT' in str(err))
def test_direct_delete_container_object(self):
with mocked_http_conn(204) as conn:
rv = direct_client.direct_delete_container_object(
self.node, self.part, self.account, self.container, self.obj)
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(rv, None)
def test_direct_delete_container_obj_error(self):
with mocked_http_conn(500) as conn:
try:
direct_client.direct_delete_container_object(
self.node, self.part, self.account, self.container,
self.obj)
except ClientException as err:
pass
else:
self.fail('ClientException not raised')
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(err.http_status, 500)
self.assertTrue('DELETE' in str(err))
def test_direct_head_object(self):
headers = HeaderKeyDict({'x-foo': 'bar'})
with mocked_http_conn(200, headers) as conn:
resp = direct_client.direct_head_object(
self.node, self.part, self.account, self.container,
self.obj, headers=headers)
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertTrue('x-timestamp' not in conn.req_headers,
'x-timestamp was in HEAD request headers')
self.assertEqual(headers, resp)
def test_direct_head_object_error(self):
with mocked_http_conn(500) as conn:
try:
direct_client.direct_head_object(
self.node, self.part, self.account, self.container,
self.obj)
except ClientException as err:
pass
else:
self.fail('ClientException not raised')
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(err.http_status, 500)
self.assertTrue('HEAD' in str(err))
def test_direct_head_object_not_found(self):
important_timestamp = Timestamp(time.time()).internal
stub_headers = {'X-Backend-Important-Timestamp': important_timestamp}
with mocked_http_conn(404, headers=stub_headers) as conn:
try:
direct_client.direct_head_object(
self.node, self.part, self.account, self.container,
self.obj)
except ClientException as err:
pass
else:
self.fail('ClientException not raised')
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(err.http_status, 404)
self.assertEqual(err.http_headers['x-backend-important-timestamp'],
important_timestamp)
def test_direct_get_object(self):
contents = six.StringIO('123456')
with mocked_http_conn(200, body=contents) as conn:
resp_header, obj_body = direct_client.direct_get_object(
self.node, self.part, self.account, self.container, self.obj)
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(obj_body, contents.getvalue())
def test_direct_get_object_error(self):
with mocked_http_conn(500) as conn:
try:
direct_client.direct_get_object(
self.node, self.part,
self.account, self.container, self.obj)
except ClientException as err:
pass
else:
self.fail('ClientException not raised')
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(err.http_status, 500)
self.assertTrue('GET' in str(err))
def test_direct_get_object_chunks(self):
contents = six.StringIO('123456')
downloaded = b''
with mocked_http_conn(200, body=contents) as conn:
resp_header, obj_body = direct_client.direct_get_object(
self.node, self.part, self.account, self.container, self.obj,
resp_chunk_size=2)
while obj_body:
try:
chunk = obj_body.next()
except StopIteration:
break
downloaded += chunk
self.assertEqual('GET', conn.method)
self.assertEqual(self.obj_path, conn.path)
self.assertEqual('123456', downloaded)
def test_direct_post_object(self):
headers = {'Key': 'value'}
resp_headers = []
with mocked_http_conn(200, resp_headers) as conn:
direct_client.direct_post_object(
self.node, self.part, self.account, self.container, self.obj,
headers)
self.assertEqual(conn.method, 'POST')
self.assertEqual(conn.path, self.obj_path)
for header in headers:
self.assertEqual(conn.req_headers[header], headers[header])
def test_direct_post_object_error(self):
headers = {'Key': 'value'}
with mocked_http_conn(500) as conn:
try:
direct_client.direct_post_object(
self.node, self.part, self.account, self.container,
self.obj, headers)
except ClientException as err:
pass
else:
self.fail('ClientException not raised')
self.assertEqual(conn.method, 'POST')
self.assertEqual(conn.path, self.obj_path)
for header in headers:
self.assertEqual(conn.req_headers[header], headers[header])
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual(err.http_status, 500)
self.assertTrue('POST' in str(err))
def test_direct_delete_object(self):
with mocked_http_conn(200) as conn:
resp = direct_client.direct_delete_object(
self.node, self.part, self.account, self.container, self.obj)
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(resp, None)
def test_direct_delete_object_with_timestamp(self):
# ensure timestamp is different from any that might be auto-generated
timestamp = Timestamp(time.time() - 100)
headers = {'X-Timestamp': timestamp.internal}
with mocked_http_conn(200) as conn:
direct_client.direct_delete_object(
self.node, self.part, self.account, self.container, self.obj,
headers=headers)
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertTrue('X-Timestamp' in conn.req_headers)
self.assertEqual(timestamp, conn.req_headers['X-Timestamp'])
def test_direct_delete_object_error(self):
with mocked_http_conn(503) as conn:
try:
direct_client.direct_delete_object(
self.node, self.part, self.account, self.container,
self.obj)
except ClientException as err:
pass
else:
self.fail('ClientException not raised')
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(err.http_status, 503)
self.assertTrue('DELETE' in str(err))
def test_direct_get_suffix_hashes(self):
data = {'a83': 'c130a2c17ed45102aada0f4eee69494ff'}
body = pickle.dumps(data)
with mocked_http_conn(200, {}, body) as conn:
resp = direct_client.direct_get_suffix_hashes(self.node,
self.part, ['a83'])
self.assertEqual(conn.method, 'REPLICATE')
self.assertEqual(conn.path, '/sda/0/a83')
self.assertEqual(conn.host, '1.2.3.5')
self.assertEqual(conn.port, '7000')
self.assertEqual(data, resp)
def _test_direct_get_suffix_hashes_fail(self, status_code):
with mocked_http_conn(status_code):
with self.assertRaises(DirectClientException) as cm:
direct_client.direct_get_suffix_hashes(
self.node, self.part, ['a83', 'b52'])
self.assertIn('REPLICATE', cm.exception.message)
self.assertIn(quote('/%s/%s/a83-b52'
% (self.node['device'], self.part)),
cm.exception.message)
self.assertIn(self.node['replication_ip'], cm.exception.message)
self.assertIn(self.node['replication_port'], cm.exception.message)
self.assertEqual(self.node['replication_ip'], cm.exception.http_host)
self.assertEqual(self.node['replication_port'], cm.exception.http_port)
self.assertEqual(self.node['device'], cm.exception.http_device)
self.assertEqual(status_code, cm.exception.http_status)
def test_direct_get_suffix_hashes_503(self):
self._test_direct_get_suffix_hashes_fail(503)
def test_direct_get_suffix_hashes_507(self):
self._test_direct_get_suffix_hashes_fail(507)
def test_direct_put_object_with_content_length(self):
contents = six.StringIO('123456')
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents, 6)
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(md5('123456').hexdigest(), resp)
def test_direct_put_object_fail(self):
contents = six.StringIO('123456')
with mocked_http_conn(500) as conn:
try:
direct_client.direct_put_object(
self.node, self.part, self.account, self.container,
self.obj, contents)
except ClientException as err:
pass
else:
self.fail('ClientException not raised')
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(err.http_status, 500)
def test_direct_put_object_chunked(self):
contents = six.StringIO('123456')
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents)
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(md5('6\r\n123456\r\n0\r\n\r\n').hexdigest(), resp)
def test_direct_put_object_args(self):
# One test to cover all missing checks
contents = ""
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents, etag="testing-etag", content_type='Text')
self.assertEqual('PUT', conn.method)
self.assertEqual(self.obj_path, conn.path)
self.assertEqual(conn.req_headers['Content-Length'], '0')
self.assertEqual(conn.req_headers['Content-Type'], 'Text')
self.assertEqual(md5('0\r\n\r\n').hexdigest(), resp)
def test_direct_put_object_header_content_length(self):
contents = six.StringIO('123456')
stub_headers = HeaderKeyDict({
'Content-Length': '6'})
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents, headers=stub_headers)
self.assertEqual('PUT', conn.method)
self.assertEqual(conn.req_headers['Content-length'], '6')
self.assertEqual(md5('123456').hexdigest(), resp)
def test_retry(self):
headers = HeaderKeyDict({'key': 'value'})
with mocked_http_conn(200, headers) as conn:
attempts, resp = direct_client.retry(
direct_client.direct_head_object, self.node, self.part,
self.account, self.container, self.obj)
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(headers, resp)
self.assertEqual(attempts, 1)
def test_retry_client_exception(self):
logger = debug_logger('direct-client-test')
with mock.patch('swift.common.direct_client.sleep') as mock_sleep, \
mocked_http_conn(500) as conn:
with self.assertRaises(direct_client.ClientException) as err_ctx:
direct_client.retry(direct_client.direct_delete_object,
self.node, self.part,
self.account, self.container, self.obj,
retries=2, error_log=logger.error)
self.assertEqual('DELETE', conn.method)
self.assertEqual(err_ctx.exception.http_status, 500)
self.assertIn('DELETE', err_ctx.exception.message)
self.assertIn(quote('/%s/%s/%s/%s/%s'
% (self.node['device'], self.part, self.account,
self.container, self.obj)),
err_ctx.exception.message)
self.assertIn(self.node['ip'], err_ctx.exception.message)
self.assertIn(self.node['port'], err_ctx.exception.message)
self.assertEqual(self.node['ip'], err_ctx.exception.http_host)
self.assertEqual(self.node['port'], err_ctx.exception.http_port)
self.assertEqual(self.node['device'], err_ctx.exception.http_device)
self.assertEqual(500, err_ctx.exception.http_status)
self.assertEqual([mock.call(1), mock.call(2)],
mock_sleep.call_args_list)
error_lines = logger.get_lines_for_level('error')
self.assertEqual(3, len(error_lines))
for line in error_lines:
self.assertIn('500 Internal Error', line)
def test_retry_http_exception(self):
logger = debug_logger('direct-client-test')
with mock.patch('swift.common.direct_client.sleep') as mock_sleep, \
mocked_http_conn(HTTPException('Kaboom!')) as conn:
with self.assertRaises(HTTPException) as err_ctx:
direct_client.retry(direct_client.direct_delete_object,
self.node, self.part,
self.account, self.container, self.obj,
retries=2, error_log=logger.error)
self.assertEqual('DELETE', conn.method)
self.assertEqual('Kaboom!', str(err_ctx.exception))
self.assertEqual([mock.call(1), mock.call(2)],
mock_sleep.call_args_list)
error_lines = logger.get_lines_for_level('error')
self.assertEqual(3, len(error_lines))
for line in error_lines:
self.assertIn('Kaboom!', line)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys, glob
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--genpydir', type='string', dest='genpydir', default='gen-py')
options, args = parser.parse_args()
del sys.argv[1:] # clean up hack so unittest doesn't complain
sys.path.insert(0, options.genpydir)
sys.path.insert(0, glob.glob('../../lib/py/build/lib.*')[0])
from ThriftTest.ttypes import *
from DebugProtoTest.ttypes import CompactProtoTestStruct, Empty
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol, TCompactProtocol, TJSONProtocol
from thrift.TSerialization import serialize, deserialize
import unittest
import time
class AbstractTest(unittest.TestCase):
def setUp(self):
self.v1obj = VersioningTestV1(
begin_in_both=12345,
old_string='aaa',
end_in_both=54321,
)
self.v2obj = VersioningTestV2(
begin_in_both=12345,
newint=1,
newbyte=2,
newshort=3,
newlong=4,
newdouble=5.0,
newstruct=Bonk(message="Hello!", type=123),
newlist=[7,8,9],
newset=set([42,1,8]),
newmap={1:2,2:3},
newstring="Hola!",
end_in_both=54321,
)
self.bools = Bools(im_true=True, im_false=False)
self.bools_flipped = Bools(im_true=False, im_false=True)
self.large_deltas = LargeDeltas (
b1=self.bools,
b10=self.bools_flipped,
b100=self.bools,
check_true=True,
b1000=self.bools_flipped,
check_false=False,
vertwo2000=VersioningTestV2(newstruct=Bonk(message='World!', type=314)),
a_set2500=set(['lazy', 'brown', 'cow']),
vertwo3000=VersioningTestV2(newset=set([2, 3, 5, 7, 11])),
big_numbers=[2**8, 2**16, 2**31-1, -(2**31-1)]
)
self.compact_struct = CompactProtoTestStruct(
a_byte = 127,
a_i16=32000,
a_i32=1000000000,
a_i64=0xffffffffff,
a_double=5.6789,
a_string="my string",
true_field=True,
false_field=False,
empty_struct_field=Empty(),
byte_list=[-127, -1, 0, 1, 127],
i16_list=[-1, 0, 1, 0x7fff],
i32_list= [-1, 0, 0xff, 0xffff, 0xffffff, 0x7fffffff],
i64_list=[-1, 0, 0xff, 0xffff, 0xffffff, 0xffffffff, 0xffffffffff, 0xffffffffffff, 0xffffffffffffff, 0x7fffffffffffffff],
double_list=[0.1, 0.2, 0.3],
string_list=["first", "second", "third"],
boolean_list=[True, True, True, False, False, False],
struct_list=[Empty(), Empty()],
byte_set=set([-127, -1, 0, 1, 127]),
i16_set=set([-1, 0, 1, 0x7fff]),
i32_set=set([1, 2, 3]),
i64_set=set([-1, 0, 0xff, 0xffff, 0xffffff, 0xffffffff, 0xffffffffff, 0xffffffffffff, 0xffffffffffffff, 0x7fffffffffffffff]),
double_set=set([0.1, 0.2, 0.3]),
string_set=set(["first", "second", "third"]),
boolean_set=set([True, False]),
#struct_set=set([Empty()]), # unhashable instance
byte_byte_map={1 : 2},
i16_byte_map={1 : 1, -1 : 1, 0x7fff : 1},
i32_byte_map={1 : 1, -1 : 1, 0x7fffffff : 1},
i64_byte_map={0 : 1, 1 : 1, -1 : 1, 0x7fffffffffffffff : 1},
double_byte_map={-1.1 : 1, 1.1 : 1},
string_byte_map={"first" : 1, "second" : 2, "third" : 3, "" : 0},
boolean_byte_map={True : 1, False: 0},
byte_i16_map={1 : 1, 2 : -1, 3 : 0x7fff},
byte_i32_map={1 : 1, 2 : -1, 3 : 0x7fffffff},
byte_i64_map={1 : 1, 2 : -1, 3 : 0x7fffffffffffffff},
byte_double_map={1 : 0.1, 2 : -0.1, 3 : 1000000.1},
byte_string_map={1 : "", 2 : "blah", 3 : "loooooooooooooong string"},
byte_boolean_map={1 : True, 2 : False},
#list_byte_map # unhashable
#set_byte_map={set([1, 2, 3]) : 1, set([0, 1]) : 2, set([]) : 0}, # unhashable
#map_byte_map # unhashable
byte_map_map={0 : {}, 1 : {1 : 1}, 2 : {1 : 1, 2 : 2}},
byte_set_map={0 : set([]), 1 : set([1]), 2 : set([1, 2])},
byte_list_map={0 : [], 1 : [1], 2 : [1, 2]},
)
self.nested_lists_i32x2 = NestedListsI32x2(
[
[ 1, 1, 2 ],
[ 2, 7, 9 ],
[ 3, 5, 8 ]
]
)
self.nested_lists_i32x3 = NestedListsI32x3(
[
[
[ 2, 7, 9 ],
[ 3, 5, 8 ]
],
[
[ 1, 1, 2 ],
[ 1, 4, 9 ]
]
]
)
self.nested_mixedx2 = NestedMixedx2( int_set_list=[
set([1,2,3]),
set([1,4,9]),
set([1,2,3,5,8,13,21]),
set([-1, 0, 1])
],
# note, the sets below are sets of chars, since the strings are iterated
map_int_strset={ 10:set('abc'), 20:set('def'), 30:set('GHI') },
map_int_strset_list=[
{ 10:set('abc'), 20:set('def'), 30:set('GHI') },
{ 100:set('lmn'), 200:set('opq'), 300:set('RST') },
{ 1000:set('uvw'), 2000:set('wxy'), 3000:set('XYZ') }
]
)
self.nested_lists_bonk = NestedListsBonk(
[
[
[
Bonk(message='inner A first', type=1),
Bonk(message='inner A second', type=1)
],
[
Bonk(message='inner B first', type=2),
Bonk(message='inner B second', type=2)
]
]
]
)
self.list_bonks = ListBonks(
[
Bonk(message='inner A', type=1),
Bonk(message='inner B', type=2),
Bonk(message='inner C', type=0)
]
)
def _serialize(self, obj):
trans = TTransport.TMemoryBuffer()
prot = self.protocol_factory.getProtocol(trans)
obj.write(prot)
return trans.getvalue()
def _deserialize(self, objtype, data):
prot = self.protocol_factory.getProtocol(TTransport.TMemoryBuffer(data))
ret = objtype()
ret.read(prot)
return ret
def testForwards(self):
obj = self._deserialize(VersioningTestV2, self._serialize(self.v1obj))
self.assertEquals(obj.begin_in_both, self.v1obj.begin_in_both)
self.assertEquals(obj.end_in_both, self.v1obj.end_in_both)
def testBackwards(self):
obj = self._deserialize(VersioningTestV1, self._serialize(self.v2obj))
self.assertEquals(obj.begin_in_both, self.v2obj.begin_in_both)
self.assertEquals(obj.end_in_both, self.v2obj.end_in_both)
def testSerializeV1(self):
obj = self._deserialize(VersioningTestV1, self._serialize(self.v1obj))
self.assertEquals(obj, self.v1obj)
def testSerializeV2(self):
obj = self._deserialize(VersioningTestV2, self._serialize(self.v2obj))
self.assertEquals(obj, self.v2obj)
def testBools(self):
self.assertNotEquals(self.bools, self.bools_flipped)
self.assertNotEquals(self.bools, self.v1obj)
obj = self._deserialize(Bools, self._serialize(self.bools))
self.assertEquals(obj, self.bools)
obj = self._deserialize(Bools, self._serialize(self.bools_flipped))
self.assertEquals(obj, self.bools_flipped)
rep = repr(self.bools)
self.assertTrue(len(rep) > 0)
def testLargeDeltas(self):
# test large field deltas (meaningful in CompactProto only)
obj = self._deserialize(LargeDeltas, self._serialize(self.large_deltas))
self.assertEquals(obj, self.large_deltas)
rep = repr(self.large_deltas)
self.assertTrue(len(rep) > 0)
def testNestedListsI32x2(self):
obj = self._deserialize(NestedListsI32x2, self._serialize(self.nested_lists_i32x2))
self.assertEquals(obj, self.nested_lists_i32x2)
rep = repr(self.nested_lists_i32x2)
self.assertTrue(len(rep) > 0)
def testNestedListsI32x3(self):
obj = self._deserialize(NestedListsI32x3, self._serialize(self.nested_lists_i32x3))
self.assertEquals(obj, self.nested_lists_i32x3)
rep = repr(self.nested_lists_i32x3)
self.assertTrue(len(rep) > 0)
def testNestedMixedx2(self):
obj = self._deserialize(NestedMixedx2, self._serialize(self.nested_mixedx2))
self.assertEquals(obj, self.nested_mixedx2)
rep = repr(self.nested_mixedx2)
self.assertTrue(len(rep) > 0)
def testNestedListsBonk(self):
obj = self._deserialize(NestedListsBonk, self._serialize(self.nested_lists_bonk))
self.assertEquals(obj, self.nested_lists_bonk)
rep = repr(self.nested_lists_bonk)
self.assertTrue(len(rep) > 0)
def testListBonks(self):
obj = self._deserialize(ListBonks, self._serialize(self.list_bonks))
self.assertEquals(obj, self.list_bonks)
rep = repr(self.list_bonks)
self.assertTrue(len(rep) > 0)
def testCompactStruct(self):
# test large field deltas (meaningful in CompactProto only)
obj = self._deserialize(CompactProtoTestStruct, self._serialize(self.compact_struct))
self.assertEquals(obj, self.compact_struct)
rep = repr(self.compact_struct)
self.assertTrue(len(rep) > 0)
class NormalBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()
class AcceleratedBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
class CompactProtocolTest(AbstractTest):
protocol_factory = TCompactProtocol.TCompactProtocolFactory()
class JSONProtocolTest(AbstractTest):
protocol_factory = TJSONProtocol.TJSONProtocolFactory()
class AcceleratedFramedTest(unittest.TestCase):
def testSplit(self):
"""Test FramedTransport and BinaryProtocolAccelerated
Tests that TBinaryProtocolAccelerated and TFramedTransport
play nicely together when a read spans a frame"""
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
bigstring = "".join(chr(byte) for byte in range(ord("a"), ord("z")+1))
databuf = TTransport.TMemoryBuffer()
prot = protocol_factory.getProtocol(databuf)
prot.writeI32(42)
prot.writeString(bigstring)
prot.writeI16(24)
data = databuf.getvalue()
cutpoint = len(data)/2
parts = [ data[:cutpoint], data[cutpoint:] ]
framed_buffer = TTransport.TMemoryBuffer()
framed_writer = TTransport.TFramedTransport(framed_buffer)
for part in parts:
framed_writer.write(part)
framed_writer.flush()
self.assertEquals(len(framed_buffer.getvalue()), len(data) + 8)
# Recreate framed_buffer so we can read from it.
framed_buffer = TTransport.TMemoryBuffer(framed_buffer.getvalue())
framed_reader = TTransport.TFramedTransport(framed_buffer)
prot = protocol_factory.getProtocol(framed_reader)
self.assertEqual(prot.readI32(), 42)
self.assertEqual(prot.readString(), bigstring)
self.assertEqual(prot.readI16(), 24)
class SerializersTest(unittest.TestCase):
def testSerializeThenDeserialize(self):
obj = Xtruct2(i32_thing=1,
struct_thing=Xtruct(string_thing="foo"))
s1 = serialize(obj)
for i in range(10):
self.assertEquals(s1, serialize(obj))
objcopy = Xtruct2()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
obj = Xtruct(string_thing="bar")
objcopy = Xtruct()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
# test booleans
obj = Bools(im_true=True, im_false=False)
objcopy = Bools()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
# test enums
for num, name in Numberz._VALUES_TO_NAMES.iteritems():
obj = Bonk(message='enum Numberz value %d is string %s' % (num, name), type=num)
objcopy = Bonk()
deserialize(objcopy, serialize(obj))
self.assertEquals(obj, objcopy)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(NormalBinaryTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedBinaryTest))
suite.addTest(loader.loadTestsFromTestCase(CompactProtocolTest))
suite.addTest(loader.loadTestsFromTestCase(JSONProtocolTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedFramedTest))
suite.addTest(loader.loadTestsFromTestCase(SerializersTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2))
|
|
# Copyright (c) 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Contains some commonly used utility methods."""
try:
import cookielib as cookie_lib
except ImportError:
import http.cookiejar as cookie_lib
import json
import re
import socket
import threading
import oslo_serialization
from oslo_utils import units
import requests
from requests import exceptions
import six
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.coprhd.helpers.urihelper import (
singletonURIHelperInstance)
PROD_NAME = 'storageos'
TIMEOUT_SEC = 20 # 20 SECONDS
IS_TASK_TIMEOUT = False
global AUTH_TOKEN
AUTH_TOKEN = None
TASK_TIMEOUT = 300
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def json_decode(rsp):
"""Used to decode the JSON encoded response."""
o = ""
try:
o = json.loads(rsp, object_hook=_decode_dict)
except ValueError:
raise CoprHdError(CoprHdError.VALUE_ERR,
(_("Failed to recognize JSON payload:\n[%s]") % rsp))
return o
def service_json_request(ip_addr, port, http_method, uri, body,
contenttype='application/json', customheaders=None):
"""Used to make an HTTP request and get the response.
The message body is encoded in JSON format
:param ip_addr: IP address or host name of the server
:param port: port number of the server on which it
is listening to HTTP requests
:param http_method: one of GET, POST, PUT, DELETE
:param uri: the request URI
:param body: the request payload
:returns: a tuple of two elements: (response body, response headers)
:raises: CoprHdError in case of HTTP errors with err_code 3
"""
SEC_AUTHTOKEN_HEADER = 'X-SDS-AUTH-TOKEN'
headers = {'Content-Type': contenttype,
'ACCEPT': 'application/json, application/octet-stream',
'X-EMC-REST-CLIENT': 'TRUE'}
if customheaders:
headers.update(customheaders)
try:
protocol = "https://"
if port == 8080:
protocol = "http://"
url = protocol + ip_addr + ":" + six.text_type(port) + uri
cookiejar = cookie_lib.LWPCookieJar()
headers[SEC_AUTHTOKEN_HEADER] = AUTH_TOKEN
if http_method == 'GET':
response = requests.get(url, headers=headers, verify=False,
cookies=cookiejar)
elif http_method == 'POST':
response = requests.post(url, data=body, headers=headers,
verify=False, cookies=cookiejar)
elif http_method == 'PUT':
response = requests.put(url, data=body, headers=headers,
verify=False, cookies=cookiejar)
elif http_method == 'DELETE':
response = requests.delete(url, headers=headers, verify=False,
cookies=cookiejar)
else:
raise CoprHdError(CoprHdError.HTTP_ERR,
(_("Unknown/Unsupported HTTP method: %s") %
http_method))
if (response.status_code == requests.codes['ok'] or
response.status_code == 202):
return (response.text, response.headers)
error_msg = None
if response.status_code == 500:
responseText = json_decode(response.text)
errorDetails = ""
if 'details' in responseText:
errorDetails = responseText['details']
error_msg = (_("CoprHD internal server error. Error details: %s"),
errorDetails)
elif response.status_code == 401:
error_msg = _("Access forbidden: Authentication required")
elif response.status_code == 403:
error_msg = ""
errorDetails = ""
errorDescription = ""
responseText = json_decode(response.text)
if 'details' in responseText:
errorDetails = responseText['details']
error_msg = (_("%(error_msg)s Error details:"
" %(errorDetails)s"),
{'error_msg': error_msg,
'errorDetails': errorDetails
})
elif 'description' in responseText:
errorDescription = responseText['description']
error_msg = (_("%(error_msg)s Error description:"
" %(errorDescription)s"),
{'error_msg': error_msg,
'errorDescription': errorDescription
})
else:
error_msg = _("Access forbidden: You don't have"
" sufficient privileges to perform this"
" operation")
elif response.status_code == 404:
error_msg = "Requested resource not found"
elif response.status_code == 405:
error_msg = six.text_type(response.text)
elif response.status_code == 503:
error_msg = ""
errorDetails = ""
errorDescription = ""
responseText = json_decode(response.text)
if 'code' in responseText:
errorCode = responseText['code']
error_msg = error_msg + "Error " + six.text_type(errorCode)
if 'details' in responseText:
errorDetails = responseText['details']
error_msg = error_msg + ": " + errorDetails
elif 'description' in responseText:
errorDescription = responseText['description']
error_msg = error_msg + ": " + errorDescription
else:
error_msg = _("Service temporarily unavailable:"
" The server is temporarily unable to"
" service your request")
else:
error_msg = response.text
if isinstance(error_msg, unicode):
error_msg = error_msg.encode('utf-8')
raise CoprHdError(CoprHdError.HTTP_ERR,
(_("HTTP code: %(status_code)s"
", %(reason)s"
" [%(error_msg)s]") % {
'status_code': six.text_type(
response.status_code),
'reason': six.text_type(
response.reason),
'error_msg': six.text_type(
error_msg)
}))
except (CoprHdError, socket.error, exceptions.SSLError,
exceptions.ConnectionError, exceptions.TooManyRedirects,
exceptions.Timeout) as e:
raise CoprHdError(CoprHdError.HTTP_ERR, six.text_type(e))
# TODO(Ravi) : Either following exception should have proper message or
# IOError should just be combined with the above statement
except IOError as e:
raise CoprHdError(CoprHdError.HTTP_ERR, six.text_type(e))
def is_uri(name):
"""Checks whether the name is a URI or not.
:param name: Name of the resource
:returns: True if name is URI, False otherwise
"""
try:
(urn, prod, trailer) = name.split(':', 2)
return (urn == 'urn' and prod == PROD_NAME)
except Exception:
return False
def format_json_object(obj):
"""Formats JSON object to make it readable by proper indentation.
:param obj: JSON object
:returns: a string of formatted JSON object
"""
return oslo_serialization.jsonutils.dumps(obj, sort_keys=True, indent=3)
def get_parent_child_from_xpath(name):
"""Returns the parent and child elements from XPath."""
if '/' in name:
(pname, label) = name.rsplit('/', 1)
else:
pname = None
label = name
return (pname, label)
def to_bytes(in_str):
"""Converts a size to bytes.
:param in_str: a number suffixed with a unit: {number}{unit}
units supported:
K, KB, k or kb - kilobytes
M, MB, m or mb - megabytes
G, GB, g or gb - gigabytes
T, TB, t or tb - terabytes
:returns: number of bytes
None; if input is incorrect
"""
match = re.search('^([0-9]+)([a-zA-Z]{0,2})$', in_str)
if not match:
return None
unit = match.group(2).upper()
value = match.group(1)
size_count = int(value)
if unit in ['K', 'KB']:
multiplier = int(units.Ki)
elif unit in ['M', 'MB']:
multiplier = int(units.Mi)
elif unit in ['G', 'GB']:
multiplier = int(units.Gi)
elif unit in ['T', 'TB']:
multiplier = int(units.Ti)
elif unit == "":
return size_count
else:
return None
size_in_bytes = int(size_count * multiplier)
return size_in_bytes
def get_list(json_object, parent_node_name, child_node_name=None):
"""Returns a list of values from child_node_name.
If child_node is not given, then it will retrieve list from parent node
"""
if not json_object:
return []
return_list = []
if isinstance(json_object[parent_node_name], list):
for detail in json_object[parent_node_name]:
if child_node_name:
return_list.append(detail[child_node_name])
else:
return_list.append(detail)
else:
if child_node_name:
return_list.append(json_object[parent_node_name][child_node_name])
else:
return_list.append(json_object[parent_node_name])
return return_list
def get_node_value(json_object, parent_node_name, child_node_name=None):
"""Returns value of given child_node.
If child_node is not given, then value of parent node is returned
returns None: If json_object or parent_node is not given,
If child_node is not found under parent_node
"""
if not json_object:
return None
if not parent_node_name:
return None
detail = json_object[parent_node_name]
if not child_node_name:
return detail
return_value = None
if child_node_name in detail:
return_value = detail[child_node_name]
else:
return_value = None
return return_value
def format_err_msg_and_raise(operation_type, component,
error_message, error_code):
"""Method to format error message.
:param operation_type: create, update, add, etc
:param component: storagesystem, vpool, etc
:param error_code: Error code from the API call
:param error_message: Detailed error message
"""
formated_err_msg = (_("Error: Failed to %(operation_type)s"
" %(component)s") %
{'operation_type': operation_type,
'component': component
})
if error_message.startswith("\"\'") and error_message.endswith("\'\""):
# stripping the first 2 and last 2 characters, which are quotes.
error_message = error_message[2:len(error_message) - 2]
formated_err_msg = formated_err_msg + "\nReason:" + error_message
raise CoprHdError(error_code, formated_err_msg)
def search_by_tag(resource_search_uri, ipaddr, port):
"""Fetches the list of resources with a given tag.
:param resource_search_uri: The tag based search uri
Example: '/block/volumes/search?tag=tagexample1'
:param ipaddr: IP address of CoprHD host
:param port: Port number
"""
# check if the URI passed has both project and name parameters
strUri = six.text_type(resource_search_uri)
if strUri.__contains__("search") and strUri.__contains__("?tag="):
# Get the project URI
(s, h) = service_json_request(
ipaddr, port, "GET",
resource_search_uri, None)
o = json_decode(s)
if not o:
return None
resources = get_node_value(o, "resource")
resource_uris = []
for resource in resources:
resource_uris.append(resource["id"])
return resource_uris
else:
raise CoprHdError(CoprHdError.VALUE_ERR, (_("Search URI %s"
" is not in the expected"
" format, it should end"
" with ?tag={0}")
% strUri))
# Timeout handler for synchronous operations
def timeout_handler():
global IS_TASK_TIMEOUT
IS_TASK_TIMEOUT = True
# Blocks the operation until the task is complete/error out/timeout
def block_until_complete(component_type,
resource_uri,
task_id,
ipAddr,
port,
synctimeout=0):
global IS_TASK_TIMEOUT
IS_TASK_TIMEOUT = False
if synctimeout:
t = threading.Timer(synctimeout, timeout_handler)
else:
synctimeout = TASK_TIMEOUT
t = threading.Timer(synctimeout, timeout_handler)
t.start()
while True:
out = get_task_by_resourceuri_and_taskId(
component_type, resource_uri, task_id, ipAddr, port)
if out:
if out["state"] == "ready":
# cancel the timer and return
t.cancel()
break
# if the status of the task is 'error' then cancel the timer
# and raise exception
if out["state"] == "error":
# cancel the timer
t.cancel()
if ("service_error" in out and
"details" in out["service_error"]):
error_message = out["service_error"]["details"]
raise CoprHdError(CoprHdError.VALUE_ERR,
(_("Task: %(task_id)s"
" is failed with"
" error: %(error_message)s") %
{'task_id': task_id,
'error_message': error_message
}))
if IS_TASK_TIMEOUT:
IS_TASK_TIMEOUT = False
raise CoprHdError(CoprHdError.TIME_OUT,
(_("Task did not complete in %d secs."
" Operation timed out. Task in CoprHD"
" will continue") % synctimeout))
return
def get_task_by_resourceuri_and_taskId(component_type, resource_uri,
task_id, ipAddr, port):
"""Returns the single task details."""
task_uri_constant = singletonURIHelperInstance.getUri(
component_type, "task")
(s, h) = service_json_request(
ipAddr, port, "GET",
task_uri_constant.format(resource_uri, task_id), None)
if not s:
return None
o = json_decode(s)
return o
class CoprHdError(exception.VolumeBackendAPIException):
"""Custom exception class used to report logical errors.
Attributes:
err_code - String error code
msg - String text
"""
SOS_FAILURE_ERR = 1
CMD_LINE_ERR = 2
HTTP_ERR = 3
VALUE_ERR = 4
NOT_FOUND_ERR = 1
ENTRY_ALREADY_EXISTS_ERR = 5
MAX_COUNT_REACHED = 6
TIME_OUT = 7
def __init__(self, err_code, msg):
self.err_code = err_code
self.msg = msg
def __str__(self):
return repr(self.msg)
class CoprHDResource(object):
def __init__(self, ipaddr, port):
"""Constructor: takes IP address and port of the CoprHD instance.
These are needed to make http requests for REST API
"""
self.ipaddr = ipaddr
self.port = port
|
|
"""Models for stories and story sections"""
import time
from datetime import datetime
import bleach
from django.conf import settings
from django.contrib.auth.models import User
from django.core import urlresolvers
from django.core.cache import cache
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save, pre_save, m2m_changed
from django.template.loader import render_to_string
from django.utils import simplejson
from django.utils.safestring import mark_safe
from django.utils.translation import get_language, ugettext_lazy as _
from django_dag.models import edge_factory, node_factory
from taggit.managers import TaggableManager
from uuidfield.fields import UUIDField
from storybase.fields import ShortTextField
from storybase.models import (TzDirtyFieldsMixin, LicensedModel, PermissionMixin,
PublishedModel, TimestampedModel, TranslatedModel, TranslationModel,
WeightedModel)
from storybase.utils import key_from_instance, unique_slugify
from storybase_asset.models import (Asset, DataSet, ASSET_TYPES,
FeaturedAssetsMixin, invalidate_featured_asset_url_cache)
from storybase_help.models import Help
from storybase_user.models import Organization, Project
from storybase_user.utils import format_user_name
from storybase_story import structure
from storybase_story.managers import (ContainerManager, SectionLayoutManager,
SectionManager, StoryManager, StoryTemplateManager)
from storybase_taxonomy.models import TaggedItem
from storybase_badge.models import Badge
class StoryPermission(PermissionMixin):
"""Permissions for the Story model"""
def anonymoususer_can_view(self, user):
if self.status == 'published':
return True
return False
def user_can_view(self, user):
from storybase_user.utils import is_admin
if self.status == 'published':
return True
if self.author == user:
return True
if user.is_superuser or is_admin(user):
return True
return False
def user_can_change(self, user):
from storybase_user.utils import is_admin
if not user.is_active:
return False
if self.author == user:
return True
if is_admin(user):
return True
return False
def user_can_add(self, user):
if user.is_active:
return True
return False
def user_can_delete(self, user):
return self.user_can_change(user)
class StoryTranslation(TranslationModel):
"""Encapsulates translated fields of a Story"""
story = models.ForeignKey('Story')
title = ShortTextField(blank=True)
summary = models.TextField(blank=True)
call_to_action = models.TextField(_("Call to Action"),
blank=True)
connected_prompt = models.TextField(_("Connected Story Prompt"),
blank=True)
class Meta:
"""Model metadata options"""
unique_together = (('story', 'language'))
def __unicode__(self):
return self.title
class Story(WeightedModel, FeaturedAssetsMixin, TzDirtyFieldsMixin,
TranslatedModel, LicensedModel, PublishedModel,
TimestampedModel, StoryPermission):
"""Metadata for a story
The Story model stores a story's metadata and aggregates a story's
media assets
"""
story_id = UUIDField(auto=True, db_index=True)
slug = models.SlugField(blank=True)
byline = models.TextField()
structure_type = models.CharField(_("structure"), max_length=20,
choices=structure.manager.get_structure_options())
# blank=True, null=True to bypass validation so the user doesn't
# have to always remember to set this in the Django admin.
# Though this is set to blank=True, null=True, we should always set
# this value. In fact, the StoryModelAdmin class sets this to
# request.user
author = models.ForeignKey(User, related_name="stories", blank=True,
null=True)
assets = models.ManyToManyField(Asset, related_name='stories',
blank=True)
datasets = models.ManyToManyField(DataSet, related_name='stories',
blank=True)
featured_assets = models.ManyToManyField(Asset,
related_name='featured_in_stories', blank=True,
help_text=_("Assets to be displayed in teaser version of Story"))
organizations = models.ManyToManyField(Organization,
related_name='stories',
blank=True)
projects = models.ManyToManyField(Project, related_name='stories',
blank=True)
is_template = models.BooleanField(_("Story is a template"),
default=False)
allow_connected = models.BooleanField(
_("Story can have connected stories"), default=False)
template_story = models.ForeignKey('Story',
related_name='template_for', blank=True, null=True,
help_text=_("Story whose structure was used to create this story"))
on_homepage = models.BooleanField(_("Featured on homepage"),
default=False)
contact_info = models.TextField(_("Contact Information"),
blank=True)
topics = models.ManyToManyField('storybase_taxonomy.Category',
verbose_name=_("Topics"),
related_name='stories',
blank=True)
locations = models.ManyToManyField('storybase_geo.Location',
verbose_name=_("Locations"),
related_name='stories',
blank=True)
places = models.ManyToManyField('storybase_geo.Place',
verbose_name=_("Places"),
related_name='stories',
blank=True)
tags = TaggableManager(through=TaggedItem, blank=True)
related_stories = models.ManyToManyField('self',
related_name='related_to',
blank=True,
through='StoryRelation',
symmetrical=False)
badges = models.ManyToManyField(Badge, related_name='stories')
objects = StoryManager()
translated_fields = ['title', 'summary', 'call_to_action',
'connected_prompt']
translation_set = 'storytranslation_set'
translation_class = StoryTranslation
_structure_obj = None
class Meta:
"""Model metadata options"""
verbose_name_plural = "stories"
def __init__(self, *args, **kwargs):
# Set a default license for Story objects
kwargs.setdefault('license', 'CC BY')
super(Story, self).__init__(*args, **kwargs)
def get_structure_obj(self):
"""Return a structure object for the story"""
if self._structure_obj is None:
# A structure object hasn't been instantiated yet.
# Create one.
structure_class = structure.manager.get_structure_class(
self.structure_type)
self._structure_obj = structure_class.__call__(story=self)
return self._structure_obj
structure = property(get_structure_obj)
def __unicode__(self):
if self.title:
return self.title
return _("Untitled Story") + " " + self.story_id
@models.permalink
def get_absolute_url(self):
"""Calculate the canonical URL for a Story"""
if self.slug:
return ('story_detail', [self.slug])
return ('story_detail', [self.story_id])
def get_share_url(self):
return urlresolvers.reverse("story_share", kwargs={
'slug': self.slug
})
def get_embed_url(self):
return urlresolvers.reverse("story_embed", kwargs={
'slug': self.slug
})
@property
def contributor_name(self):
"""
Return the contributor's first name and last initial or username
"""
return format_user_name(self.author)
def to_simple(self):
"""
Return a simplified version of the story object that can be
passed to json.dumps()
"""
return {
'story_id': self.story_id,
'title': self.title,
'summary': self.summary
}
def to_json(self):
"""Return JSON representation of this object"""
return mark_safe(simplejson.dumps(self.to_simple()))
def get_default_featured_asset(self):
"""
Return the first image asset.
"""
# See if there are any image assets defined on the model
assets = self.assets.filter(type='image').select_subclasses()
if (assets.count()):
return assets[0]
# No image assets either
return None
def get_featured_asset(self):
"""Return the featured asset"""
featured_assets = self.featured_assets.select_subclasses()
try:
# Return the first featured asset. We have the ability of
# selecting multiple featured assets. Perhaps in the future
# allow for specifying a particular feature asset or randomly
# displaying one.
return featured_assets[0]
except IndexError:
# No featured_assets found
return None
def render_story_structure(self, format='html'):
"""Render a representation of the Story structure"""
output = []
try:
root = self.get_root_section()
except Section.DoesNotExist:
return ''
output.append('<ul>')
output.append(root.render(format))
output.append('</ul>')
return mark_safe(u'\n'.join(output))
def get_explore_url(self, filters=None):
"""
Get a URL pointing to the explore view with specific filters set
"""
url = urlresolvers.reverse('explore_stories')
qs_params = []
for filter, values in filters.items():
if values:
qs_params.append("%s=%s" % (filter, ",".join([str(value) for value in values])))
url += "?" + "&".join(qs_params)
return url
def topics_with_links(self):
"""
Return topics with links to the explore view with filters set
to that topic
"""
# This is a little kludgy and it seems to be cleaner to just
# handle this in a ``get_absolute_url`` method of the
# ``Category`` model. However, I wanted to keep the knowledge of
# the explore view decoupled from the Categories model in case we
# want to use Categories for categorizing things other than stories.
topics = [{'name': topic.name, 'url': self.get_explore_url({'topics': [topic.pk]})} for topic in self.topics.all()]
return topics
def related_key(self, field, language=""):
"""Get a cache key for a ManyToMany field for a particular language"""
extra = field + ':' + language if language else field
return key_from_instance(self, extra)
def get_related_list(self, field, id_field, name_field):
"""
Get a list of id, name hashes for a ManyToMany field of this story
Uses the cache if possible
Arguments:
field -- the name of the ManyToMany field of the model instance
id_field -- the name of the field on the related model that holds the
id value
name_field -- the name of the field on the related model that holds the
name value
This is mostly used to dehydrate ManyToMany fields in ``StoryResource``,
but is defined here to try to keep all knowledge fo the caching strategy
"""
language = get_language()
key = self.related_key(field, language)
obj_list = cache.get(key, None)
if obj_list is not None:
return obj_list
manager = getattr(self, field)
obj_list = [{ 'id': getattr(obj, id_field), 'name': getattr(obj, name_field) }
for obj in manager.all()]
cache.set(key, obj_list)
return obj_list
def topics_list(self):
"""Get a list of id, name pairs for the Story's topics"""
return self.get_related_list('topics', 'pk', 'name')
def organizations_list(self):
"""Get a list of id, name pairs for the Story's organizations"""
return self.get_related_list('organizations', 'organization_id', 'name')
def projects_list(self):
"""Get a list of id, name pairs for the Story's projects"""
return self.get_related_list('projects', 'project_id', 'name')
def places_list(self):
"""Get a list of id, name pairs for the Story's places"""
return self.get_related_list('places', 'place_id', 'name')
@property
def inherited_places(self):
"""Get places related to this story, including parents"""
inherited_places = set()
for place in self.places.all():
inherited_places.add(place)
inherited_places.update(place.ancestors_set())
return inherited_places
@property
def points(self):
"""
Get points (longitude, latitude pairs) related to the story
If the story has locations related with the story, use those,
otherwise try to find centroids of related places.
"""
key = self.related_key('points')
points = cache.get(key, None)
if points is not None:
return points
points = []
if self.locations.count():
points = [(loc.lat, loc.lng) for loc in self.locations.all()]
elif self.places.count():
# We need to track the geolevel of the first place we've found
# with a boundary so we can try to add points for all other
# places at that geolevel
point_geolevel = None
# Loop through related places looking at smaller geographies
# first
for place in self.places.all().order_by('-geolevel__level'):
if place.boundary:
# Place has a geometry associated with it
centroid = place.boundary.centroid
if not point_geolevel:
points.append((centroid.y, centroid.x))
point_geolevel = place.geolevel
else:
if place.geolevel == point_geolevel:
points.append((centroid.y, centroid.x))
else:
# We've exhausted all the points at the
# lowest geolevel. Quit.
break
# TODO: Decide if we should check non-explicit places
cache.set(key, points)
return points
def natural_key(self):
return (self.story_id,)
def connected_stories(self, published_only=True, draft_author=None):
"""Get a queryset of connected stories"""
qs = self.related_stories.connected()
if published_only:
# By default only return published connected stories
qs = qs.published()
elif draft_author:
# Alternately, include draft stories by a particular
# author
qs = qs.filter(Q(status='published') | Q(status='draft', author=draft_author))
return qs
def connected_to_stories(self):
"""Get a queryset of stories that this story is connected to"""
return self.related_to.seed()
def connected_to(self):
"""
Returns the Story model instance for the seed story of a connected story
"""
connected_to = self.connected_to_stories()
if connected_to.count():
return connected_to[0]
else:
return None
def connected_to_url(self):
"""
Returns the URL for the seed story of a connected story
"""
connected_to = self.connected_to()
if connected_to:
return connected_to.get_absolute_url()
else:
return None
def is_connected(self):
"""
Is this story a connected story?
"""
return self.connected_to_stories().count() > 0
def connected_count(self):
"""
Helper for the API to get a count of connected stories.
"""
if not self.allow_connected:
# Connected stories aren't enabled for this story. Return early
# and save a call to the DB
return 0
return self.related_stories.connected().published().count()
def builder_url(self):
if self.is_connected():
return urlresolvers.reverse('connected_story_builder',
kwargs={'source_slug':self.connected_to().slug,
'story_id': self.story_id})
else:
return urlresolvers.reverse('story_builder',
kwargs={'story_id': self.story_id})
def viewer_url(self):
if self.is_connected():
slug = self.connected_to().slug
else:
slug = self.slug
url = urlresolvers.reverse('story_viewer',
kwargs={'slug': slug})
if self.is_connected():
url = "%s#connected-stories/%s" % (url, self.story_id)
return url
def get_prompt(self):
connected_to = self.connected_to_stories()
if (not connected_to):
return ""
return connected_to[0].connected_prompt
@classmethod
def get_default_img_url_choices(cls):
return settings.STORYBASE_DEFAULT_STORY_IMAGES
def used_assets(self, asset_type=None):
"""Return a queryset of assets actually used in story sections"""
assets = self.assets.exclude(sectionasset=None)
if asset_type:
assets = assets.filter(type=asset_type)
return assets.select_subclasses()
def asset_strings(self):
"""Return all the text from a Story's assets as a single string
This is meant to be used to help generate the document used to
index the story for full-text search using Haystack.
"""
strings = []
# For now, only worry about text assets
for asset in self.used_assets(asset_type='text'):
s = asset.strings()
if s:
strings.append(s)
return " ".join(strings)
def search_result_metadata(self):
"""Helper method for providing search result metadata to template"""
metadata = []
languages = self.get_language_names()
topics = self.topics_list()
topics_tags = []
explore_url = urlresolvers.reverse('explore_stories')
search_url = urlresolvers.reverse('haystack_search')
if languages:
metadata.append({
'id': 'languages',
'name': _("Languages"),
'values': [
{
'name': lang['name'],
'url': "%s?languages=%s" % (explore_url, lang['id']),
}
for lang in languages
],
})
if topics:
topics_tags.extend([
{
'name': topic['name'],
'url': "%s?topics=%s" % (explore_url, topic['id']),
}
for topic in topics
])
if self.tags.count():
topics_tags.extend([
{
'name': tag.name,
'url': "%s?q=%s" % (search_url, tag.name),
}
for tag in self.tags.all()
])
metadata.append({
'id': 'tags',
'name': _("Tags"),
'values': topics_tags,
})
return metadata
def normalize_for_view(self, img_width):
"""Return attributes as a dictionary for use in a view context
This allows using the same template across different models with
differently-named attributes that hold similar information.
"""
context = {
"type": _("Story"),
"title": self.title,
"author": self.contributor_name,
"date": self.created,
"image_html": self.render_featured_asset(width=img_width),
"excerpt": self.summary,
"url": self.get_absolute_url(),
"more_link_text": _("View All Stories"),
"more_link_url": urlresolvers.reverse("explore_stories"),
"viewer_url": self.viewer_url()
}
if not self.allow_connected:
return context
context['connected_count'] = self.connected_stories().count()
return context
def get_weight(self):
if self.published:
return time.mktime(self.published.timetuple())
else:
return 0
def asset_datasets(self):
"""
Return datasets used in the story
Story.datasets contains *all* datasets associated
with the story, but could include datasets associated
with assets that are no longer displayed in sections
"""
if not hasattr(self, '_asset_datasets'):
self._asset_datasets = self.datasets.filter(assets__sectionasset__section__story=self).select_subclasses()
return self._asset_datasets
def has_all_assets(self):
for section in self.sections.all():
if section.has_all_assets() is False:
return False
return True
def set_story_slug(sender, instance, **kwargs):
"""
When a StoryTranslation is saved, set its Story's slug if it doesn't have
one
Should be connected to StoryTranslation's post_save signal.
"""
try:
if not instance.story.slug:
unique_slugify(instance.story, instance.title)
instance.story.save()
except Story.DoesNotExist:
# Instance doesn't have a related story.
# Encountered this when loading fixture
pass
def set_story_slug_on_publish(sender, instance, **kwargs):
"""Update a story's slug when it is published"""
if instance.pk and instance.status == 'published' and instance.published is None:
# Only update the slug for stories that are:
# * Being published
# * Have not been previously published
# * Has been previously saved
unique_slugify(instance, instance.title)
def set_date_and_weight_on_published(sender, instance, **kwargs):
"""Set the published date of a story on status change"""
try:
old_instance = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
# Object is new, so field won't have changed.
# Just check status.
if instance.status == 'published':
instance.published = datetime.now()
# Update the weight, based on the new published date
instance.weight = instance.get_weight()
else:
if (instance.status == 'published' and
old_instance.status != 'published'):
instance.published = datetime.now()
# Update the weight, based on the new published date
instance.weight = instance.get_weight()
def update_last_edited(sender, instance, **kwargs):
"""
When an object is added to a Story, update the related object's
last edited field.
"""
action = kwargs.get('action')
pk_set = kwargs.get('pk_set')
model = kwargs.get('model')
reverse = kwargs.get('reverse')
if action in ("post_add", "post_remove") and pk_set and not reverse:
for obj in model.objects.filter(pk__in=pk_set):
obj.last_edited = datetime.now()
obj.save()
def add_assets(sender, **kwargs):
"""
Add asset to a Story's asset list
This is meant as a signal handler to automatically add assets
to a Story's assets list when they're added to the
featured_assets relation
"""
instance = kwargs.get('instance')
action = kwargs.get('action')
pk_set = kwargs.get('pk_set')
model = kwargs.get('model')
reverse = kwargs.get('reverse')
if action == "post_add" and not reverse:
for obj in model.objects.filter(pk__in=pk_set):
instance.assets.add(obj)
instance.save()
def set_default_featured_asset(sender, instance, **kwargs):
"""
If a story is published and no featured asset has been specified,
set a default one.
"""
if (instance.pk and instance.status == 'published' and
instance.featured_assets.count() == 0):
asset = instance.get_default_featured_asset()
if asset is not None:
instance.featured_assets.add(asset)
def set_asset_license(sender, instance, **kwargs):
changed_fields = instance.get_dirty_fields().keys()
if instance.pk and 'license' in changed_fields:
# Update all assets' licenses to that of the Story's if a
# license hasn't already been set.
instance.assets.filter(license='').update(license=instance.license)
def invalidate_related_cache(sender, instance, field_name, language_key=True,
**kwargs):
"""
Helper function for invalidating cached version of a Story's ManyToMany
field.
"""
action = kwargs.get('action')
reverse = kwargs.get('reverse')
if action in ("post_add", "post_remove", "post_clear") and not reverse:
if not language_key:
cache.delete(instance.related_key(field_name))
else:
languages = getattr(settings, 'LANGUAGES', None)
if languages:
keys = []
for (code, name) in settings.LANGUAGES:
keys.append(instance.related_key(field_name, code))
cache.delete_many(keys)
def invalidate_places_cache(sender, instance, **kwargs):
"""Invalidate the cached version of a Story's ``places`` field"""
invalidate_related_cache(sender, instance, 'places', **kwargs)
def invalidate_points_cache(sender, instance, **kwargs):
"""Invalidate the cached version of a Story's ``locations`` field"""
invalidate_related_cache(sender, instance, 'points', language_key=False,
**kwargs)
def invalidate_topics_cache(sender, instance, **kwargs):
"""Invalidate the cached version of a Story's ``topics`` field"""
invalidate_related_cache(sender, instance, 'topics', **kwargs)
def invalidate_organizations_cache(sender, instance, **kwargs):
"""Invalidate the cached version of Story's ``organizations`` field"""
invalidate_related_cache(sender, instance, 'organizations', **kwargs)
def invalidate_projects_cache(sender, instance, **kwargs):
"""Invalidate the cached version of Story's ``projects`` field"""
invalidate_related_cache(sender, instance, 'projects', **kwargs)
def update_weight_for_connected(sender, instance, **kwargs):
"""
Update the weight field on the seed story when a
connected story is published.
"""
if instance.status == 'published':
dirty_fields = instance.get_dirty_fields()
if 'status' in dirty_fields:
# Story is being published
connected_to = instance.connected_to()
if connected_to is not None:
# Story is a connected story
# Save it's seed story to update the seed story's
# last_edited field
connected_to.weight = instance.get_weight()
connected_to.save()
def clean_storytranslation_html(sender, instance, **kwargs):
instance.summary = bleach.clean(instance.summary,
settings.STORYBASE_ALLOWED_TAGS)
instance.call_to_action = bleach.clean(instance.call_to_action,
settings.STORYBASE_ALLOWED_TAGS)
instance.connected_prompt = bleach.clean(instance.connected_prompt,
settings.STORYBASE_ALLOWED_TAGS)
# Hook up some signal handlers
pre_save.connect(set_story_slug_on_publish, sender=Story)
pre_save.connect(set_date_and_weight_on_published, sender=Story)
pre_save.connect(set_default_featured_asset, sender=Story)
pre_save.connect(set_asset_license, sender=Story)
pre_save.connect(update_weight_for_connected, sender=Story)
pre_save.connect(clean_storytranslation_html, sender=StoryTranslation)
post_save.connect(set_story_slug, sender=StoryTranslation)
m2m_changed.connect(update_last_edited, sender=Story.organizations.through)
m2m_changed.connect(update_last_edited, sender=Story.projects.through)
m2m_changed.connect(add_assets, sender=Story.featured_assets.through)
m2m_changed.connect(invalidate_places_cache, sender=Story.places.through)
m2m_changed.connect(invalidate_points_cache, sender=Story.locations.through)
m2m_changed.connect(invalidate_topics_cache, sender=Story.topics.through)
m2m_changed.connect(invalidate_projects_cache, sender=Story.projects.through)
m2m_changed.connect(invalidate_organizations_cache, sender=Story.organizations.through)
m2m_changed.connect(invalidate_featured_asset_url_cache, sender=Story.featured_assets.through)
class StoryRelationPermission(PermissionMixin):
"""Permissions for Story Relations"""
def user_can_change(self, user):
from storybase_user.utils import is_admin
if not user.is_active:
return False
# TODO: Add additional logic as different relation types
# are defined
if self.relation_type == 'connected' and self.target.author == user:
# Users should be able to define the parent of connected
# stories for stories that they own
return True
if is_admin(user):
return True
return False
def user_can_add(self, user):
return self.user_can_change(user)
def user_can_delete(self, user):
return self.user_can_change(user)
class StoryRelation(StoryRelationPermission, models.Model):
"""Relationship between two stories"""
RELATION_TYPES = (
('connected', u"Connected Story"),
)
DEFAULT_TYPE = 'connected'
relation_id = UUIDField(auto=True)
relation_type = models.CharField(max_length=25, choices=RELATION_TYPES,
default=DEFAULT_TYPE)
source = models.ForeignKey(Story, related_name="target")
target = models.ForeignKey(Story, related_name="source")
class SectionPermission(PermissionMixin):
"""Permissions for the Section model"""
def user_can_change(self, user):
from storybase_user.utils import is_admin
if not user.is_active:
return False
if self.story.author == user:
return True
if is_admin(user):
return True
return False
def user_can_add(self, user):
return self.user_can_change(user)
def user_can_delete(self, user):
return self.user_can_change(user)
class SectionTranslation(TranslationModel):
"""Translated fields of a Section"""
section = models.ForeignKey('Section')
title = ShortTextField()
class Meta:
"""Model metadata options"""
unique_together = (('section', 'language'))
def __unicode__(self):
return self.title
class Section(node_factory('SectionRelation'), TranslatedModel,
SectionPermission):
""" Section of a story """
section_id = UUIDField(auto=True, db_index=True)
story = models.ForeignKey('Story', related_name='sections')
# True if this section the root section of the story, either
# the first section in a linear story, or the central node
# in a drill-down/"spider" structure. Otherwise, False
root = models.BooleanField(default=False)
weight = models.IntegerField(default=0, help_text=_("The ordering of top-level sections relative to each other. Sections with lower weight values are shown before ones with higher weight values in lists."))
layout = models.ForeignKey('SectionLayout', null=True)
help = models.ForeignKey(Help, null=True)
template_section = models.ForeignKey('Section', blank=True, null=True,
related_name='template_for',
help_text=_("A section that provides default values for layout, asset types and help for this section."))
assets = models.ManyToManyField(Asset, related_name='sections',
blank=True, through='SectionAsset')
objects = SectionManager()
translated_fields = ['title']
translation_set = 'sectiontranslation_set'
translation_class = SectionTranslation
def __unicode__(self):
try:
return self.title
except IndexError:
# HACK: Need this to support deleting Sections through
# an inline on the Story Change page
#
# When deleting an object in the Django admin, a Section
# object gets instantiated with no translation set.
# So, the call to __getattr__() raises an IndexError
return super(Section, self).__unicode__()
@property
def sections_in_same_story(self):
"""Return other sections in the same story"""
return self.__class__.objects.filter(story=self.story)
def is_island(self):
"""
Check if has no ancestors nor children
This is a patch for a broken implementation in django_dag.
"""
# TODO: Remove this when django_dag is fixed.
# See https://github.com/elpaso/django-dag/pull/4
return bool(not self.children.count() and not self.ancestors_set())
def child_relations(self):
"""
Get a query set of through model instances for children
of this section
"""
return self.children.through.objects.filter(parent=self).order_by(
'weight', 'child__sectiontranslation__title')
def get_next_section(self):
"""Get the next section"""
return self.story.structure.get_next_section(self)
def get_previous_section(self):
"""Get the previous section"""
return self.story.structure.get_previous_section(self)
def render(self, format='html', show_title=True):
"""Render a representation of the section structure"""
try:
return getattr(self, "render_" + format).__call__(show_title)
except AttributeError:
return self.__unicode__()
def to_simple(self):
"""
Return a simplified representation of this object for serialization
"""
simple = {
'section_id': self.section_id,
'title': self.title,
'children': []
}
next_section = self.get_next_section()
previous_section = self.get_previous_section()
if next_section:
simple.update({'next_section_id': next_section.section_id})
if previous_section:
simple.update(
{'previous_section_id': previous_section.section_id})
for child_relation in self.child_relations():
simple['children'].append(child_relation.child.section_id)
return simple
def render_html(self, show_title=True):
"""Render a HTML representation of the section structure"""
default_template = "storybase_story/sectionlayouts/weighted.html"
assets = self.sectionasset_set.order_by('weight')
output = []
context = {
'assets': assets,
'section': self
}
if show_title:
output.append("<h2 class='title'>%s</h2>" % self.title)
# If there isn't a layout specified, default to the one that just
# orders the section's assets by their weight.
template_filename = (self.layout.get_template_filename()
if self.layout is not None else default_template)
output.append(render_to_string(template_filename, context))
return mark_safe(u'\n'.join(output))
def change_link(self):
"""Generate a link to the Django admin change page
You can specify this in the Model Admin's readonly_fields or
list_display options
"""
if self.pk:
change_url = urlresolvers.reverse(
'admin:storybase_story_section_change', args=(self.pk,))
return "<a href='%s'>Change Section</a>" % change_url
else:
return ''
change_link.short_description = 'Change'
change_link.allow_tags = True
def natural_key(self):
return (self.section_id,)
natural_key.dependencies = ['storybase_help.help', 'storybase_story.story']
def has_all_assets(self):
"""Returns true if every container in this section has an asset"""
if self.layout is None:
# There's no layout, so no sensible requirement for a certain
# number of assets
return True
for container in self.layout.containers.all():
try:
SectionAsset.objects.get(section=self, container=container)
except SectionAsset.DoesNotExist:
return False
return True
class SectionRelation(edge_factory(Section, concrete=False)):
"""Through class for parent/child relationships between sections"""
weight = models.IntegerField(default=0)
def __unicode__(self):
return u"%s is child of %s" % (self.child, self.parent)
class SectionAssetPermission(PermissionMixin):
"""Permissions for the SectionAsset model"""
def user_can_change(self, user):
from storybase_user.utils import is_admin
if not user.is_active:
return False
if self.section.story.author == user:
return True
if is_admin(user):
return True
return False
def user_can_add(self, user):
return self.user_can_change(user)
def user_can_delete(self, user):
return self.user_can_change(user)
class SectionAsset(models.Model, SectionAssetPermission):
"""Through class for Asset to Section relations"""
section = models.ForeignKey('Section')
asset = models.ForeignKey('storybase_asset.Asset')
container = models.ForeignKey('Container', null=True)
# This won't really get used moving forward, but needs to stay to
# support backward compatibility for the initial set of stories on
# staging during the development process.
weight = models.IntegerField(default=0)
class Meta:
unique_together = (("section", "container", "weight"),)
def add_section_asset_to_story(sender, instance, **kwargs):
"""When an asset is added to a Section, also add it to the Story
Should be connected to SectionAsset's post_save signal.
"""
try:
if instance.asset not in instance.section.story.assets.all():
# An asset was added to a section but it is not related to
# the section's story.
# Add it to the Story's list of assets.
instance.section.story.assets.add(instance.asset)
instance.section.story.save()
except Asset.DoesNotExist:
# Instance doesn't have a related asset.
# Encountered when loading fixture
pass
# Add assets to stories when they're added to sections
post_save.connect(add_section_asset_to_story, sender=SectionAsset)
def update_story_last_edited(sender, instance, **kwargs):
"""Update the a section's story's last edited field
Should be connected to Section's post_save signal.
"""
# Last edited is automatically set on save
instance.story.save()
# Update a section's story's last edited field when the section is saved
post_save.connect(update_story_last_edited, sender=Section)
class StoryTemplateTranslation(TranslationModel):
"""Translatable fields for the StoryTemplate model"""
story_template = models.ForeignKey('StoryTemplate')
title = ShortTextField()
tag_line = ShortTextField(blank=True)
description = models.TextField(blank=True)
ingredients = ShortTextField(blank=True)
best_for = ShortTextField(blank=True)
tip = ShortTextField(blank=True)
def __unicode__(self):
return self.title
class StoryTemplate(TranslatedModel):
"""Metadata for a template used to create new stories"""
TIME_NEEDED_CHOICES = (
('5 minutes', _('5 minutes')),
('30 minutes', _('30 minutes')),
)
LEVEL_CHOICES = (
('beginner', _("Beginner")),
)
template_id = UUIDField(auto=True, db_index=True)
story = models.ForeignKey('Story', blank=True, null=True,
help_text=_("The story that provides the structure for this "
"template"))
time_needed = models.CharField(max_length=140,
choices=TIME_NEEDED_CHOICES, blank=True,
help_text=_("The amount of time needed to create a story of this "
"type"))
level = models.CharField(max_length=140,
choices=LEVEL_CHOICES, blank=True,
help_text=_("The level of storytelling experience suggested to "
"create stories with this template"))
slug = models.SlugField(unique=True,
help_text=_("A human-readable unique identifier"))
examples = models.ManyToManyField('Story', blank=True, null=True,
help_text=_("Stories that are examples of this template"),
related_name="example_for")
objects = StoryTemplateManager()
# Class attributes to handle translation
translated_fields = ['title', 'description', 'tag_line', 'ingredients',
'best_for', 'tip']
translation_set = 'storytemplatetranslation_set'
translation_class = StoryTemplateTranslation
def __unicode__(self):
return self.title
def natural_key(self):
return (self.template_id,)
class SectionLayoutTranslation(TranslationModel):
"""Translatable fields for the SectionLayout model"""
layout = models.ForeignKey('SectionLayout')
name = ShortTextField()
def __unicode__(self):
return self.name
class SectionLayout(TranslatedModel):
TEMPLATE_CHOICES = [(name, name) for name
in settings.STORYBASE_LAYOUT_TEMPLATES]
layout_id = UUIDField(auto=True, db_index=True)
template = models.CharField(_("template"), max_length=100, choices=TEMPLATE_CHOICES)
containers = models.ManyToManyField('Container', related_name='layouts',
blank=True)
slug = models.SlugField(unique=True)
objects = SectionLayoutManager()
# Class attributes to handle translation
translated_fields = ['name']
translation_set = 'sectionlayouttranslation_set'
translation_class = SectionLayoutTranslation
def __unicode__(self):
return self.name
def get_template_filename(self):
return "storybase_story/sectionlayouts/%s" % (self.template)
def get_template_contents(self):
template_filename = self.get_template_filename()
return render_to_string(template_filename)
def natural_key(self):
return (self.layout_id,)
class Container(models.Model):
"""
A space to put assets within a ``TemplateLayout``
"""
name = models.SlugField(unique=True)
objects = ContainerManager()
def __unicode__(self):
return self.name
def natural_key(self):
return (self.name,)
class ContainerTemplate(models.Model):
"""Per-asset configuration for template assets in builder"""
container_template_id = UUIDField(auto=True, db_index=True)
template = models.ForeignKey('StoryTemplate')
section = models.ForeignKey('Section')
container = models.ForeignKey('Container')
asset_type = models.CharField(max_length=10, choices=ASSET_TYPES,
blank=True,
help_text=_("Default asset type"))
can_change_asset_type = models.BooleanField(default=False,
help_text=_("User can change the asset type from the default"))
help = models.ForeignKey(Help, blank=True, null=True)
def __unicode__(self):
return "%s / %s / %s" % (self.template.title, self.section.title, self.container.name)
# Internal API functions for creating model instances in a way that
# abstracts out the translation logic a bit.
def create_story(title, structure_type=structure.DEFAULT_STRUCTURE,
summary='', call_to_action='', connected_prompt='',
language=settings.LANGUAGE_CODE,
*args, **kwargs):
"""Convenience function for creating a Story
Allows for the creation of stories without having to explicitly
deal with the translations.
"""
obj = Story(structure_type=structure_type, *args, **kwargs)
obj.save()
translation = StoryTranslation(story=obj, title=title, summary=summary,
call_to_action=call_to_action,
connected_prompt=connected_prompt,
language=language)
translation.save()
return obj
def create_section(title, story, layout=None,
language=settings.LANGUAGE_CODE, *args, **kwargs):
"""Convenience function for creating a Section
Allows for the creation of a section without having to explicitly
deal with the tranlsations.
"""
obj = Section(story=story, layout=layout, *args, **kwargs)
obj.save()
translation = SectionTranslation(section=obj, title=title,
language=language)
translation.save()
return obj
def create_story_template(title, story, tag_line='', description='',
language=settings.LANGUAGE_CODE, *args, **kwargs):
obj = StoryTemplate(story=story, *args, **kwargs)
obj.save()
translation = StoryTemplateTranslation(story_template=obj,
title=title, tag_line=tag_line, description=description)
translation.save()
return obj
|
|
import base64
import datetime
import gnupg
import httplib
import json
import os
import random
import requests
import sys
import tempfile
from ConfigParser import ConfigParser
from logging_settings import get_logger
from psycopg2 import IntegrityError
from subprocess import Popen, PIPE
from M2Crypto import DSA, BIO
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../local')
import local_settings
#logging settings
logger = get_logger()
class connect_error():
''' little stub so we can polymorph on the error '''
def __init__(self):
self.status_code = 500
def get_host(cloud):
return local_settings.API_HOST
def get_port(cloud):
return local_settings.NOVA_PROXY_PORT
def host_and_headers(cloud, project_id, auth_token):
host = '%s:%s' % (get_host(cloud), get_port(cloud))
headers = {
'Host': host,
'Accept': 'application/json',
'User-Agent': 'python-novaclient',
'Content-Type': 'application/json',
'Accept-Encoding': 'gzip, deflate',
'X-Auth-Project-Id': project_id,
'X-Auth-Token': auth_token
}
return host, headers
def get_instances(cloud, project_id, auth_token):
''' query the api for instances '''
host, headers = host_and_headers(cloud, project_id, auth_token)
logger.info("%s", host)
url = "http://%s/v2/%s/servers/detail" % (host, project_id)
logger.info(url)
try:
response = requests.get(url, headers=headers)
logger.info(response.text)
except requests.exceptions.ConnectionError:
logger.info("connection failed")
return connect_error()
return response
def get_instance_name(project_id, auth_token, cloud, instance_id, instances):
''' Find this instance's name '''
for instance in instances["servers"]:
if "id" in instance and instance["id"] == instance_id \
and instance["cloud_id"] == cloud:
return instance["name"]
def node_delete_request(cloud, project_id, auth_token, node_id):
''' delete the node '''
host, headers = host_and_headers(cloud, project_id, auth_token)
logger.info("%s", host)
url = 'http://%s/v1.1/%s/servers/%s' % (host, project_id, node_id)
try:
response = requests.delete(url, headers=headers)
except requests.exceptions.ConnectionError:
return connect_error()
logger.debug("tried to terminate instance %s", node_id)
logger.debug(response)
return response
def node_launch_request(cloud, project_id, auth_token, node_object):
''' Do the node launching stuff.
TODO: factor out the proxy style so that auth_multiple keys and this can
both share it. '''
host, headers = host_and_headers(cloud, project_id, auth_token)
logger.info("%s", host)
url = 'http://%s/v1.1/%s/servers' % (host, project_id)
json_string = json.dumps(node_object)
headers['Content-Length'] = str(len(json_string))
try:
response = requests.post(url, headers=headers, data=json_string)
except requests.exceptions.ConnectionError:
logger.info("we had a probvlem")
return connect_error()
return response
def flavor_request(cloud, project_id, auth_token, flavor):
''' Get details about this flavor '''
host, headers = host_and_headers(cloud, project_id, auth_token)
logger.info("%s", host)
url = 'http://%s/v2/%s/flavors/%s' % (host, project_id, flavor)
try:
response = requests.get(url, headers=headers)
except requests.exceptions.ConnectionError:
return connect_error()
return response
def get_cores(cloud, project_id, auth_token, flavor):
''' Get the number of cores this flavor has '''
#TODO: need to get all flavors and look through them for reliable ...
response = flavor_request(cloud, project_id, auth_token, flavor)
if response.status_code == 200:
flavor_details = json.loads(response.text)
return flavor_details["flavor"]["vcpus"]
def get_user_data(file_name, format_dict):
''' Read file in same dir and format with the dict then b64 encode'''
script_file = open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
file_name))
script = script_file.read()
script_file.close()
script = script % format_dict
logger.debug(script)
return base64.b64encode(script)
def run_ssh_on_string(command, string):
temp = tempfile.NamedTemporaryFile(delete=False)
temp.write(string)
temp.close()
process = Popen(command % temp.name, stdout=PIPE, shell=True)
exit_code = os.waitpid(process.pid, 0)
output = process.communicate()[0]
os.unlink(temp.name)
return output
def generate_keypair(password=None):
dsa = DSA.gen_params(1024, os.urandom)
mem_pub = BIO.MemoryBuffer()
mem_private = BIO.MemoryBuffer()
dsa.gen_key()
if password is None:
dsa.save_key_bio(mem_private, cipher=None)
else:
dsa.save_key_bio(mem_private, callback=lambda _: password)
private_key = mem_private.getvalue()
dsa.save_pub_key_bio(mem_pub)
public_key = run_ssh_on_string(local_settings.SSH_KEYGEN_COMMAND + " -f %s -i -m PKCS8",
mem_pub.getvalue())[:-1]
return public_key, private_key
def launch_instances(project_id, auth_token, cloud, image, flavor, number,
cluster_id, username, cloud_auth_token, keyname):
''' Launch a tiny headnode and number compute nodes with flavor and image
'''
cores = get_cores(cloud, project_id, auth_token, flavor)
public_key, private_key = generate_keypair()
head_node_user_data = get_user_data("torque_server.py",
{"username": username, "cluster_id": cluster_id, "nodes": number,
"host": local_settings.clouds[cloud]["nova_host"],
"port": local_settings.clouds[cloud]["nova_port"],
"auth_token": cloud_auth_token, "tenant_id": project_id,
"pdc": "True" if local_settings.clouds[cloud]["torque"]["pdc"] else "False",
"cores": cores,
"setup_dir": local_settings.clouds[cloud]["torque"]["setup_dir"],
"public_key": public_key, "private_key": private_key,
"headnode_script":
local_settings.clouds[cloud]["torque"]["headnode_script"]})
head_node = {
"server": {
"name": "%s-torque-headnode-%s" % (cloud, cluster_id),
"flavorRef": 3,
"imageRef": local_settings.clouds[cloud]["torque"]["headnode_image"],
"max_count": 1,
"min_count": 1,
"user_data": head_node_user_data,
"security_groups": [{"name": "default"}]
}
}
if keyname is not None:
head_node["server"]["key_name"] = keyname
response = node_launch_request(cloud, project_id, auth_token, head_node)
if response.status_code != 200:
return response.status_code
head_node_response = json.loads(response.text)
node_ids = [head_node_response["server"]["id"]]
compute_node_user_data = get_user_data("torque-node.sh",
{"username": username, "cluster_id": cluster_id,
"pdc": "true" if local_settings.clouds[cloud]["torque"]["pdc"] else "false",
"setup_dir": local_settings.clouds[cloud]["torque"]["setup_dir"],
"public_key": public_key, "private_key": private_key,
"node_script": local_settings.clouds[cloud]["torque"]["node_script"]})
#for i in range(int(number)):
compute_node = {
"server": {
"name": "%s-torque-node-%s" % (cloud, cluster_id),
"flavorRef": flavor,
"imageRef": image,
"max_count": number,
"min_count": number,
"user_data": compute_node_user_data,
"security_groups": [{"name": "default"}]
}
}
if keyname is not None:
compute_node["server"]["key_name"] = keyname
response = node_launch_request(cloud, project_id, auth_token, compute_node)
if response.status_code != 200:
logger.debug(response.status_code)
logger.debug(response.text)
logger.debug("Couldn't launch instances")
logger.debug("going to kill and nova dont care")
# terminate all the previously launched instances
for node_id in node_ids:
node_delete_request(cloud, project_id, auth_token, node_id)
return response.status_code
node_response = json.loads(response.text)
node_ids.append([node_response["server"]["id"]])
return 200
def launch_cluster(project_id, auth_token, cloud, username, image, flavor,
number, cloud_auth_token, keyname=None):
''' Main cluster launch method. Launches the instances needed for the
cluster then dispatches the request to the cluster service running on
the cloud headnode where it can run the specialized boot up services.
TODO: There might actually be no reason to have a centralized cluster
service we can use -f to tell the each node in the cluster exactly what
it needs to do. '''
logger.debug("launching cluster")
rand_base = "0000000%s" % random.randrange(sys.maxint)
date = datetime.datetime.now()
# underscore is not allowed in a hostname
cluster_id = "%s-%s" % (rand_base[-8:], date.strftime("%m-%d-%y"))
status = launch_instances(project_id, auth_token, cloud, image, flavor,
number, cluster_id, username, cloud_auth_token, keyname)
logger.debug(status)
if status != 200:
return '{"message": "Not all nodes could be created.", "code": 409}'
return json.dumps({"servers": [ {"id": ""} for i in range(int(number)) ] })
def delete_cluster(project_id, auth_token, cloud, username, instance_id):
''' Find the name of the instance then all instances with that same name
and the headnode and delete all of those instances. '''
logger.debug("deleting cluster")
real_id = instance_id[len("cluster" + cloud) + 1:]
instances = json.loads(get_instances(cloud, project_id, auth_token).text)
name = get_instance_name(project_id, auth_token, cloud, real_id, instances)
logger.info("the name %s", name)
torque_id = "-".join(name.split("-")[2:])
error = False
for instance in [i for i in instances["servers"]
if "name" in i and "-" in i["name"]]:
base = "-".join(instance["name"].split("-")[:2])
name_id = "-".join(instance["name"].split("-")[2:])
if (base == "torque-node" or base == "torque-headnode") and \
name_id == torque_id:
logger.debug("deleting %s %s", instance["id"], instance["name"])
response = node_delete_request(cloud, project_id, auth_token, instance["id"])
if response.status_code != 200:
error = True
logger.debug(response.text)
if error:
return '{"message": "Not all nodes could be deleted.", "code": 409}'
return '{"server": []}'
def main():
logger.debug("in main")
if len(sys.argv) == 9:
print launch_cluster(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4],
sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8])
if len(sys.argv) == 10:
print launch_cluster(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4],
sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8], sys.argv[9])
elif len(sys.argv) == 6:
print delete_cluster(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4],
sys.argv[5])
if __name__ == "__main__":
main()
|
|
import json
import optparse
import os
import sqlite3
import sys
version = "0.5.0"
gene_count = 0
def asbool(val):
if isinstance(val, str):
val_lower = val.strip().lower()
if val_lower in ('true', '1'):
return True
elif val_lower in ('false', '0'):
return False
else:
raise ValueError(f"Cannot convert {val} to bool")
else:
return bool(val)
class Sequence:
def __init__(self, header, sequence_parts):
self.header = header
self.sequence_parts = sequence_parts
self._sequence = None
@property
def sequence(self):
if self._sequence is None:
self._sequence = ''.join(self.sequence_parts)
return self._sequence
def print(self, fh=sys.stdout):
print(self.header, file=fh)
for line in self.sequence_parts:
print(line, file=fh)
def FASTAReader_gen(fasta_filename):
with open(fasta_filename) as fasta_file:
line = fasta_file.readline()
while True:
if not line:
return
assert line.startswith('>'), "FASTA headers must start with >"
header = line.rstrip()
sequence_parts = []
line = fasta_file.readline()
while line and line[0] != '>':
sequence_parts.append(line.rstrip())
line = fasta_file.readline()
yield Sequence(header, sequence_parts)
def create_tables(conn):
cur = conn.cursor()
cur.execute('''CREATE TABLE meta (
version VARCHAR PRIMARY KEY NOT NULL)''')
cur.execute('INSERT INTO meta (version) VALUES (?)',
(version, ))
cur.execute('''CREATE TABLE gene (
gene_id VARCHAR PRIMARY KEY NOT NULL,
gene_symbol VARCHAR,
seq_region_name VARCHAR NOT NULL,
seq_region_start INTEGER NOT NULL,
seq_region_end INTEGER NOT NULL,
seq_region_strand INTEGER NOT NULL,
species VARCHAR NOT NULL,
biotype VARCHAR,
gene_json VARCHAR NOT NULL)''')
cur.execute('CREATE INDEX gene_symbol_index ON gene (gene_symbol)')
cur.execute('''CREATE TABLE transcript (
transcript_id VARCHAR PRIMARY KEY NOT NULL,
transcript_symbol VARCHAR,
protein_id VARCHAR UNIQUE,
protein_sequence VARCHAR,
biotype VARCHAR,
is_canonical BOOLEAN NOT NULL DEFAULT FALSE,
gene_id VARCHAR NOT NULL REFERENCES gene(gene_id))''')
# The following temporary view is not used in GAFA, so schema changes to it
# don't require a meta version upgrade.
cur.execute('''CREATE TEMPORARY VIEW transcript_join_gene AS
SELECT transcript_id, transcript_symbol, COALESCE(transcript.biotype, gene.biotype) AS biotype, is_canonical, gene_id, gene_symbol, seq_region_name, species
FROM transcript JOIN gene
USING (gene_id)''')
conn.commit()
def fetch_transcript_and_gene(conn, transcript_id):
cur = conn.cursor()
cur.execute('SELECT * FROM transcript_join_gene WHERE transcript_id=?',
(transcript_id, ))
return cur.fetchone()
def remove_type_from_list_of_ids(ids):
return ','.join(remove_type_from_id(id_) for id_ in ids.split(','))
def remove_type_from_id(id_):
colon_index = id_.find(':')
if colon_index >= 0:
return id_[colon_index + 1:]
else:
return id_
def feature_to_dict(cols, parent_dict=None):
d = {
'end': int(cols[4]),
'start': int(cols[3]),
}
for attr in cols[8].split(';'):
if '=' in attr:
(tag, value) = attr.split('=')
if tag == 'ID':
tag = 'id'
value = remove_type_from_id(value)
elif tag == 'Parent':
value = remove_type_from_list_of_ids(value)
elif tag == 'representative':
tag = 'is_canonical'
d[tag] = value
if cols[6] == '+':
d['strand'] = 1
elif cols[6] == '-':
d['strand'] = -1
else:
raise Exception(f"Unrecognized strand: {cols[6]}")
if parent_dict is not None and 'Parent' in d:
# a 3' UTR can be split among multiple exons
# a 5' UTR can be split among multiple exons
# a CDS can be part of multiple transcripts
for parent in d['Parent'].split(','):
parent_dict.setdefault(parent, []).append(d)
return d
def add_gene_to_dict(cols, species, gene_dict):
global gene_count
gene = feature_to_dict(cols)
if not gene['id']:
raise Exception(f"Id not found among column 9 attribute tags: {cols[8]}")
gene.update({
'member_id': gene_count,
'object_type': 'Gene',
'seq_region_name': cols[0],
'species': species,
'Transcript': [],
'display_name': gene.get('Name'),
})
gene_dict[gene['id']] = gene
gene_count = gene_count + 1
def add_transcript_to_dict(cols, species, transcript_dict):
transcript = feature_to_dict(cols)
transcript.update({
'object_type': 'Transcript',
'seq_region_name': cols[0],
'species': species,
'display_name': transcript.get('Name'),
})
transcript_dict[transcript['id']] = transcript
def add_exon_to_dict(cols, species, exon_parent_dict):
exon = feature_to_dict(cols, exon_parent_dict)
exon.update({
'length': int(cols[4]) - int(cols[3]) + 1,
'object_type': 'Exon',
'seq_region_name': cols[0],
'species': species,
})
if 'id' not in exon and 'Name' in exon:
exon['id'] = exon['Name']
def add_cds_to_dict(cols, cds_parent_dict):
cds = feature_to_dict(cols, cds_parent_dict)
if 'id' not in cds:
if 'Name' in cds:
cds['id'] = cds['Name']
elif 'Parent' in cds and ',' not in cds['Parent']:
cds['id'] = cds['Parent']
def join_dicts(gene_dict, transcript_dict, exon_parent_dict, cds_parent_dict, five_prime_utr_parent_dict, three_prime_utr_parent_dict):
for parent, exon_list in exon_parent_dict.items():
if parent in transcript_dict:
exon_list.sort(key=lambda _: _['start'])
transcript_dict[parent]['Exon'] = exon_list
for transcript_id, transcript in transcript_dict.items():
translation = {
'CDS': [],
'id': None,
'end': transcript['end'],
'object_type': 'Translation',
'species': transcript['species'],
'start': transcript['start'],
}
found_cds = False
derived_translation_start = None
derived_translation_end = None
if transcript_id in cds_parent_dict:
cds_list = cds_parent_dict[transcript_id]
unique_cds_ids = {cds['id'] for cds in cds_list}
if len(unique_cds_ids) > 1:
msg = f"""Found multiple CDS IDs ({unique_cds_ids}) for transcript '{transcript_id}'.
This is not supported by the Ensembl JSON format. If a CDS is split across
multiple discontinuous genomic locations, the GFF3 standard requires that all
corresponding lines use the same ID attribute."""
raise Exception(msg)
cds_id = unique_cds_ids.pop()
translation['id'] = cds_id
cds_list.sort(key=lambda _: _['start'])
translation['CDS'] = cds_list
translation['start'] = cds_list[0]['start']
translation['end'] = cds_list[-1]['end']
found_cds = True
if transcript_id in five_prime_utr_parent_dict:
five_prime_utr_list = five_prime_utr_parent_dict[transcript_id]
five_prime_utr_list.sort(key=lambda _: _['start'])
if transcript['strand'] == 1:
derived_translation_start = five_prime_utr_list[-1]['end'] + 1
else:
derived_translation_end = five_prime_utr_list[0]['start'] - 1
if transcript_id in three_prime_utr_parent_dict:
three_prime_utr_list = three_prime_utr_parent_dict[transcript_id]
three_prime_utr_list.sort(key=lambda _: _['start'])
if transcript['strand'] == 1:
derived_translation_end = three_prime_utr_list[0]['start'] - 1
else:
derived_translation_start = three_prime_utr_list[-1]['end'] + 1
if derived_translation_start is not None:
if found_cds:
if derived_translation_start > translation['start']:
raise Exception(f"Transcript {transcript_id} has the start of CDS {cds_id} overlapping with the UTR end")
else:
translation['start'] = derived_translation_start
if derived_translation_end is not None:
if found_cds:
if derived_translation_end < translation['end']:
raise Exception(f"Transcript {transcript_id} has the end of CDS {cds_id} overlapping with the UTR start")
else:
translation['end'] = derived_translation_end
if found_cds or derived_translation_start is not None or derived_translation_end is not None:
transcript['Translation'] = translation
for transcript in transcript_dict.values():
if 'Parent' in transcript:
# A polycistronic transcript can have multiple parents
for parent in transcript['Parent'].split(','):
if parent in gene_dict:
gene_dict[parent]['Transcript'].append(transcript)
def write_gene_dict_to_db(conn, gene_dict):
cur = conn.cursor()
for gene in gene_dict.values():
if gene is None:
# This can happen when loading a JSON file from Ensembl
continue
if 'confidence' in gene and gene['confidence'].lower() != 'high':
print("Gene {} has confidence {} (not high), discarding".format(gene['id'], gene['confidence']), file=sys.stderr)
continue
gene_id = gene['id']
cur.execute('INSERT INTO gene (gene_id, gene_symbol, seq_region_name, seq_region_start, seq_region_end, seq_region_strand, species, biotype, gene_json) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
(gene_id, gene.get('display_name'), gene['seq_region_name'], gene['start'], gene['end'], gene['strand'], gene['species'], gene.get('biotype'), json.dumps(gene)))
if "Transcript" in gene:
for transcript in gene["Transcript"]:
transcript_id = transcript['id']
transcript_symbol = transcript.get('display_name')
protein_id = transcript.get('Translation', {}).get('id')
biotype = transcript.get('biotype')
is_canonical = asbool(transcript.get('is_canonical', False))
to_insert = (transcript_id, transcript_symbol, protein_id, biotype, is_canonical, gene_id)
try:
cur.execute('INSERT INTO transcript (transcript_id, transcript_symbol, protein_id, biotype, is_canonical, gene_id) VALUES (?, ?, ?, ?, ?, ?)',
to_insert)
except Exception as e:
raise Exception(f"Error while inserting {to_insert} into transcript table: {e}")
conn.commit()
def remove_id_version(s, force=False):
"""
Remove the optional '.VERSION' from an id if it's an Ensembl id or if
`force` is True.
"""
if force or s.startswith('ENS'):
return s.split('.')[0]
else:
return s
def __main__():
parser = optparse.OptionParser()
parser.add_option('--gff3', action='append', default=[], help='GFF3 file to convert, in SPECIES:FILENAME format. Use multiple times to add more files')
parser.add_option('--json', action='append', default=[], help='JSON file to merge. Use multiple times to add more files')
parser.add_option('--fasta', action='append', default=[], help='Path of the input FASTA files')
parser.add_option('--filter', type='choice', choices=['canonical', 'coding', ''], default='', help='Which transcripts to keep')
parser.add_option('--headers', type='choice',
choices=['TranscriptId_species', 'TranscriptID-GeneSymbol_species', 'TranscriptID-TranscriptSymbol_species', ''],
default='', help='Change the header line of the FASTA sequences to this format')
parser.add_option('--regions', default="", help='Comma-separated list of region IDs for which FASTA sequences should be filtered')
parser.add_option('-o', '--output', help='Path of the output SQLite file')
parser.add_option('--of', help='Path of the output FASTA file')
parser.add_option('--ff', default=os.devnull, help='Path of the filtered sequences output FASTA file')
options, args = parser.parse_args()
if args:
raise Exception('Use options to provide inputs')
conn = sqlite3.connect(options.output)
conn.row_factory = sqlite3.Row
conn.execute('PRAGMA foreign_keys = ON')
create_tables(conn)
for gff3_arg in options.gff3:
try:
(species, filename) = gff3_arg.split(':')
except ValueError:
raise Exception(f"Argument for --gff3 '{gff3_arg}' is not in the SPECIES:FILENAME format")
gene_dict = dict()
transcript_dict = dict()
exon_parent_dict = dict()
cds_parent_dict = dict()
five_prime_utr_parent_dict = dict()
three_prime_utr_parent_dict = dict()
unimplemented_feature_nlines_dict = dict()
with open(filename) as f:
for i, line in enumerate(f, start=1):
line = line.strip()
if not line:
# skip empty lines
continue
if line[0] == '#':
# skip comment lines
continue
cols = line.split('\t')
if len(cols) != 9:
raise Exception(f"Line {i} in file '{filename}': '{line}' does not have 9 columns")
feature_type = cols[2]
try:
if feature_type == 'gene':
add_gene_to_dict(cols, species, gene_dict)
elif feature_type in ('mRNA', 'transcript'):
add_transcript_to_dict(cols, species, transcript_dict)
elif feature_type == 'exon':
add_exon_to_dict(cols, species, exon_parent_dict)
elif feature_type == 'five_prime_UTR':
feature_to_dict(cols, five_prime_utr_parent_dict)
elif feature_type == 'three_prime_UTR':
feature_to_dict(cols, three_prime_utr_parent_dict)
elif feature_type == 'CDS':
add_cds_to_dict(cols, cds_parent_dict)
elif feature_type in unimplemented_feature_nlines_dict:
unimplemented_feature_nlines_dict[feature_type] += 1
else:
unimplemented_feature_nlines_dict[feature_type] = 0
except Exception as e:
print(f"Line {i} in file '{filename}': {e}", file=sys.stderr)
for unimplemented_feature, nlines in unimplemented_feature_nlines_dict.items():
print(f"Skipped {nlines} lines in GFF3 file '{filename}': '{unimplemented_feature}' is not an implemented feature type", file=sys.stderr)
join_dicts(gene_dict, transcript_dict, exon_parent_dict, cds_parent_dict, five_prime_utr_parent_dict, three_prime_utr_parent_dict)
write_gene_dict_to_db(conn, gene_dict)
for json_arg in options.json:
with open(json_arg) as f:
write_gene_dict_to_db(conn, json.load(f))
# Read the FASTA files a first time to:
# - determine for each file if we need to force the removal of the version
# from the transcript id
# - fill gene_transcripts_dict when keeping only the canonical transcripts
force_remove_id_version_file_list = []
gene_transcripts_dict = dict()
for fasta_arg in options.fasta:
force_remove_id_version = False
found_gene_transcript = False
for entry in FASTAReader_gen(fasta_arg):
# Extract the transcript id by removing everything after the first space and then removing the version if needed
transcript_id = remove_id_version(entry.header[1:].lstrip().split(' ')[0], force_remove_id_version)
transcript = fetch_transcript_and_gene(conn, transcript_id)
if not transcript and not found_gene_transcript:
# We have not found a proper gene transcript in this file yet,
# try to force the removal of the version from the transcript id
transcript_id = remove_id_version(entry.header[1:].lstrip().split(' ')[0], True)
transcript = fetch_transcript_and_gene(conn, transcript_id)
# Remember that we need to force the removal for this file
if transcript:
force_remove_id_version = True
force_remove_id_version_file_list.append(fasta_arg)
print(f"Forcing removal of id version in FASTA file '{fasta_arg}'", file=sys.stderr)
if not transcript:
print(f"Transcript '{transcript_id}' in FASTA file '{fasta_arg}' not found in the gene feature information", file=sys.stderr)
continue
if options.filter != 'canonical':
break
found_gene_transcript = True
if len(entry.sequence) % 3 != 0:
continue
transcript_biotype = transcript['biotype'] # This is the biotype of the transcript or, if that is NULL, the one of the gene
if transcript_biotype and transcript_biotype != 'protein_coding':
continue
gene_transcripts_dict.setdefault(transcript['gene_id'], []).append((transcript_id, transcript['is_canonical'], len(entry.sequence)))
if options.filter == 'canonical':
selected_transcript_ids = []
for gene_id, transcript_tuples in gene_transcripts_dict.items():
canonical_transcript_ids = [id_ for (id_, is_canonical, _) in transcript_tuples if is_canonical]
if not canonical_transcript_ids:
# Select the transcript with the longest sequence. If more than
# one transcripts have the same longest sequence for a gene, the
# first one to appear in the FASTA file is selected.
selected_transcript_id = max(transcript_tuples, key=lambda transcript_tuple: transcript_tuple[2])[0]
elif len(canonical_transcript_ids) > 1:
raise Exception(f"Gene {gene_id} has more than 1 canonical transcripts")
else:
selected_transcript_id = canonical_transcript_ids[0]
selected_transcript_ids.append(selected_transcript_id)
regions = [_.strip().lower() for _ in options.regions.split(",")]
with open(options.of, 'w') as output_fasta_file, open(options.ff, 'w') as filtered_fasta_file:
for fasta_arg in options.fasta:
force_remove_id_version = fasta_arg in force_remove_id_version_file_list
for entry in FASTAReader_gen(fasta_arg):
transcript_id = remove_id_version(entry.header[1:].lstrip().split(' ')[0], force_remove_id_version)
transcript = fetch_transcript_and_gene(conn, transcript_id)
if not transcript:
print(f"Transcript '{transcript_id}' in FASTA file '{fasta_arg}' not found in the gene feature information", file=sys.stderr)
continue
if options.filter == 'canonical':
# We already filtered out non-protein-coding transcripts when populating gene_transcripts_dict
if transcript_id not in selected_transcript_ids:
continue
elif options.filter == 'coding':
if len(entry.sequence) % 3 != 0:
print(f"Transcript '{transcript_id}' in FASTA file '{fasta_arg}' has a coding sequence length which is not multiple of 3, removing from FASTA output", file=sys.stderr)
continue
transcript_biotype = transcript['biotype'] # This is the biotype of the transcript or, if that is NULL, the one of the gene
if transcript_biotype and transcript_biotype != 'protein_coding':
print(f"Transcript {transcript_id} has biotype {transcript_biotype} (not protein-coding), removing from FASTA output", file=sys.stderr)
continue
if options.headers == "TranscriptId_species":
# Change the FASTA header to '>TranscriptId_species', as required by TreeBest
# Remove any underscore in the species
entry.header = ">{}_{}".format(transcript_id, transcript['species'].replace('_', ''))
elif options.headers == "TranscriptID-GeneSymbol_species":
# Remove any underscore in the species
entry.header = ">{}-{}_{}".format(transcript_id, transcript['gene_symbol'], transcript['species'].replace('_', ''))
elif options.headers == "TranscriptID-TranscriptSymbol_species":
# Remove any underscore in the species
entry.header = ">{}-{}_{}".format(transcript_id, transcript['transcript_symbol'], transcript['species'].replace('_', ''))
if transcript['seq_region_name'].lower() in regions:
entry.print(filtered_fasta_file)
else:
entry.print(output_fasta_file)
conn.close()
if __name__ == '__main__':
__main__()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import absolute_import, division, print_function
from datetime import timedelta
from salt.ext.tornado import gen, locks
from salt.ext.tornado.gen import TimeoutError
from salt.ext.tornado.testing import gen_test, AsyncTestCase
from salt.ext.tornado.test.util import unittest, skipBefore35, exec_test
class ConditionTest(AsyncTestCase):
def setUp(self):
super(ConditionTest, self).setUp()
self.history = []
def record_done(self, future, key):
"""Record the resolution of a Future returned by Condition.wait."""
def callback(_):
if not future.result():
# wait() resolved to False, meaning it timed out.
self.history.append('timeout')
else:
self.history.append(key)
future.add_done_callback(callback)
def test_repr(self):
c = locks.Condition()
self.assertIn('Condition', repr(c))
self.assertNotIn('waiters', repr(c))
c.wait()
self.assertIn('waiters', repr(c))
@gen_test
def test_notify(self):
c = locks.Condition()
self.io_loop.call_later(0.01, c.notify)
yield c.wait()
def test_notify_1(self):
c = locks.Condition()
self.record_done(c.wait(), 'wait1')
self.record_done(c.wait(), 'wait2')
c.notify(1)
self.history.append('notify1')
c.notify(1)
self.history.append('notify2')
self.assertEqual(['wait1', 'notify1', 'wait2', 'notify2'],
self.history)
def test_notify_n(self):
c = locks.Condition()
for i in range(6):
self.record_done(c.wait(), i)
c.notify(3)
# Callbacks execute in the order they were registered.
self.assertEqual(list(range(3)), self.history)
c.notify(1)
self.assertEqual(list(range(4)), self.history)
c.notify(2)
self.assertEqual(list(range(6)), self.history)
def test_notify_all(self):
c = locks.Condition()
for i in range(4):
self.record_done(c.wait(), i)
c.notify_all()
self.history.append('notify_all')
# Callbacks execute in the order they were registered.
self.assertEqual(
list(range(4)) + ['notify_all'],
self.history)
@gen_test
def test_wait_timeout(self):
c = locks.Condition()
wait = c.wait(timedelta(seconds=0.01))
self.io_loop.call_later(0.02, c.notify) # Too late.
yield gen.sleep(0.03)
self.assertFalse((yield wait))
@gen_test
def test_wait_timeout_preempted(self):
c = locks.Condition()
# This fires before the wait times out.
self.io_loop.call_later(0.01, c.notify)
wait = c.wait(timedelta(seconds=0.02))
yield gen.sleep(0.03)
yield wait # No TimeoutError.
@gen_test
def test_notify_n_with_timeout(self):
# Register callbacks 0, 1, 2, and 3. Callback 1 has a timeout.
# Wait for that timeout to expire, then do notify(2) and make
# sure everyone runs. Verifies that a timed-out callback does
# not count against the 'n' argument to notify().
c = locks.Condition()
self.record_done(c.wait(), 0)
self.record_done(c.wait(timedelta(seconds=0.01)), 1)
self.record_done(c.wait(), 2)
self.record_done(c.wait(), 3)
# Wait for callback 1 to time out.
yield gen.sleep(0.02)
self.assertEqual(['timeout'], self.history)
c.notify(2)
yield gen.sleep(0.01)
self.assertEqual(['timeout', 0, 2], self.history)
self.assertEqual(['timeout', 0, 2], self.history)
c.notify()
self.assertEqual(['timeout', 0, 2, 3], self.history)
@gen_test
def test_notify_all_with_timeout(self):
c = locks.Condition()
self.record_done(c.wait(), 0)
self.record_done(c.wait(timedelta(seconds=0.01)), 1)
self.record_done(c.wait(), 2)
# Wait for callback 1 to time out.
yield gen.sleep(0.02)
self.assertEqual(['timeout'], self.history)
c.notify_all()
self.assertEqual(['timeout', 0, 2], self.history)
@gen_test
def test_nested_notify(self):
# Ensure no notifications lost, even if notify() is reentered by a
# waiter calling notify().
c = locks.Condition()
# Three waiters.
futures = [c.wait() for _ in range(3)]
# First and second futures resolved. Second future reenters notify(),
# resolving third future.
futures[1].add_done_callback(lambda _: c.notify())
c.notify(2)
self.assertTrue(all(f.done() for f in futures))
@gen_test
def test_garbage_collection(self):
# Test that timed-out waiters are occasionally cleaned from the queue.
c = locks.Condition()
for _ in range(101):
c.wait(timedelta(seconds=0.01))
future = c.wait()
self.assertEqual(102, len(c._waiters))
# Let first 101 waiters time out, triggering a collection.
yield gen.sleep(0.02)
self.assertEqual(1, len(c._waiters))
# Final waiter is still active.
self.assertFalse(future.done())
c.notify()
self.assertTrue(future.done())
class EventTest(AsyncTestCase):
def test_repr(self):
event = locks.Event()
self.assertTrue('clear' in str(event))
self.assertFalse('set' in str(event))
event.set()
self.assertFalse('clear' in str(event))
self.assertTrue('set' in str(event))
def test_event(self):
e = locks.Event()
future_0 = e.wait()
e.set()
future_1 = e.wait()
e.clear()
future_2 = e.wait()
self.assertTrue(future_0.done())
self.assertTrue(future_1.done())
self.assertFalse(future_2.done())
@gen_test
def test_event_timeout(self):
e = locks.Event()
with self.assertRaises(TimeoutError):
yield e.wait(timedelta(seconds=0.01))
# After a timed-out waiter, normal operation works.
self.io_loop.add_timeout(timedelta(seconds=0.01), e.set)
yield e.wait(timedelta(seconds=1))
def test_event_set_multiple(self):
e = locks.Event()
e.set()
e.set()
self.assertTrue(e.is_set())
def test_event_wait_clear(self):
e = locks.Event()
f0 = e.wait()
e.clear()
f1 = e.wait()
e.set()
self.assertTrue(f0.done())
self.assertTrue(f1.done())
class SemaphoreTest(AsyncTestCase):
def test_negative_value(self):
self.assertRaises(ValueError, locks.Semaphore, value=-1)
def test_repr(self):
sem = locks.Semaphore()
self.assertIn('Semaphore', repr(sem))
self.assertIn('unlocked,value:1', repr(sem))
sem.acquire()
self.assertIn('locked', repr(sem))
self.assertNotIn('waiters', repr(sem))
sem.acquire()
self.assertIn('waiters', repr(sem))
def test_acquire(self):
sem = locks.Semaphore()
f0 = sem.acquire()
self.assertTrue(f0.done())
# Wait for release().
f1 = sem.acquire()
self.assertFalse(f1.done())
f2 = sem.acquire()
sem.release()
self.assertTrue(f1.done())
self.assertFalse(f2.done())
sem.release()
self.assertTrue(f2.done())
sem.release()
# Now acquire() is instant.
self.assertTrue(sem.acquire().done())
self.assertEqual(0, len(sem._waiters))
@gen_test
def test_acquire_timeout(self):
sem = locks.Semaphore(2)
yield sem.acquire()
yield sem.acquire()
acquire = sem.acquire(timedelta(seconds=0.01))
self.io_loop.call_later(0.02, sem.release) # Too late.
yield gen.sleep(0.3)
with self.assertRaises(gen.TimeoutError):
yield acquire
sem.acquire()
f = sem.acquire()
self.assertFalse(f.done())
sem.release()
self.assertTrue(f.done())
@gen_test
def test_acquire_timeout_preempted(self):
sem = locks.Semaphore(1)
yield sem.acquire()
# This fires before the wait times out.
self.io_loop.call_later(0.01, sem.release)
acquire = sem.acquire(timedelta(seconds=0.02))
yield gen.sleep(0.03)
yield acquire # No TimeoutError.
def test_release_unacquired(self):
# Unbounded releases are allowed, and increment the semaphore's value.
sem = locks.Semaphore()
sem.release()
sem.release()
# Now the counter is 3. We can acquire three times before blocking.
self.assertTrue(sem.acquire().done())
self.assertTrue(sem.acquire().done())
self.assertTrue(sem.acquire().done())
self.assertFalse(sem.acquire().done())
@gen_test
def test_garbage_collection(self):
# Test that timed-out waiters are occasionally cleaned from the queue.
sem = locks.Semaphore(value=0)
futures = [sem.acquire(timedelta(seconds=0.01)) for _ in range(101)]
future = sem.acquire()
self.assertEqual(102, len(sem._waiters))
# Let first 101 waiters time out, triggering a collection.
yield gen.sleep(0.02)
self.assertEqual(1, len(sem._waiters))
# Final waiter is still active.
self.assertFalse(future.done())
sem.release()
self.assertTrue(future.done())
# Prevent "Future exception was never retrieved" messages.
for future in futures:
self.assertRaises(TimeoutError, future.result)
class SemaphoreContextManagerTest(AsyncTestCase):
@gen_test
def test_context_manager(self):
sem = locks.Semaphore()
with (yield sem.acquire()) as yielded:
self.assertTrue(yielded is None)
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@skipBefore35
@gen_test
def test_context_manager_async_await(self):
# Repeat the above test using 'async with'.
sem = locks.Semaphore()
namespace = exec_test(globals(), locals(), """
async def f():
async with sem as yielded:
self.assertTrue(yielded is None)
""")
yield namespace['f']()
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@gen_test
def test_context_manager_exception(self):
sem = locks.Semaphore()
with self.assertRaises(ZeroDivisionError):
with (yield sem.acquire()):
1 / 0
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@gen_test
def test_context_manager_timeout(self):
sem = locks.Semaphore()
with (yield sem.acquire(timedelta(seconds=0.01))):
pass
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@gen_test
def test_context_manager_timeout_error(self):
sem = locks.Semaphore(value=0)
with self.assertRaises(gen.TimeoutError):
with (yield sem.acquire(timedelta(seconds=0.01))):
pass
# Counter is still 0.
self.assertFalse(sem.acquire().done())
@gen_test
def test_context_manager_contended(self):
sem = locks.Semaphore()
history = []
@gen.coroutine
def f(index):
with (yield sem.acquire()):
history.append('acquired %d' % index)
yield gen.sleep(0.01)
history.append('release %d' % index)
yield [f(i) for i in range(2)]
expected_history = []
for i in range(2):
expected_history.extend(['acquired %d' % i, 'release %d' % i])
self.assertEqual(expected_history, history)
@gen_test
def test_yield_sem(self):
# Ensure we catch a "with (yield sem)", which should be
# "with (yield sem.acquire())".
with self.assertRaises(gen.BadYieldError):
with (yield locks.Semaphore()):
pass
def test_context_manager_misuse(self):
# Ensure we catch a "with sem", which should be
# "with (yield sem.acquire())".
with self.assertRaises(RuntimeError):
with locks.Semaphore():
pass
class BoundedSemaphoreTest(AsyncTestCase):
def test_release_unacquired(self):
sem = locks.BoundedSemaphore()
self.assertRaises(ValueError, sem.release)
# Value is 0.
sem.acquire()
# Block on acquire().
future = sem.acquire()
self.assertFalse(future.done())
sem.release()
self.assertTrue(future.done())
# Value is 1.
sem.release()
self.assertRaises(ValueError, sem.release)
class LockTests(AsyncTestCase):
def test_repr(self):
lock = locks.Lock()
# No errors.
repr(lock)
lock.acquire()
repr(lock)
def test_acquire_release(self):
lock = locks.Lock()
self.assertTrue(lock.acquire().done())
future = lock.acquire()
self.assertFalse(future.done())
lock.release()
self.assertTrue(future.done())
@gen_test
def test_acquire_fifo(self):
lock = locks.Lock()
self.assertTrue(lock.acquire().done())
N = 5
history = []
@gen.coroutine
def f(idx):
with (yield lock.acquire()):
history.append(idx)
futures = [f(i) for i in range(N)]
self.assertFalse(any(future.done() for future in futures))
lock.release()
yield futures
self.assertEqual(list(range(N)), history)
@skipBefore35
@gen_test
def test_acquire_fifo_async_with(self):
# Repeat the above test using `async with lock:`
# instead of `with (yield lock.acquire()):`.
lock = locks.Lock()
self.assertTrue(lock.acquire().done())
N = 5
history = []
namespace = exec_test(globals(), locals(), """
async def f(idx):
async with lock:
history.append(idx)
""")
futures = [namespace['f'](i) for i in range(N)]
lock.release()
yield futures
self.assertEqual(list(range(N)), history)
@gen_test
def test_acquire_timeout(self):
lock = locks.Lock()
lock.acquire()
with self.assertRaises(gen.TimeoutError):
yield lock.acquire(timeout=timedelta(seconds=0.01))
# Still locked.
self.assertFalse(lock.acquire().done())
def test_multi_release(self):
lock = locks.Lock()
self.assertRaises(RuntimeError, lock.release)
lock.acquire()
lock.release()
self.assertRaises(RuntimeError, lock.release)
@gen_test
def test_yield_lock(self):
# Ensure we catch a "with (yield lock)", which should be
# "with (yield lock.acquire())".
with self.assertRaises(gen.BadYieldError):
with (yield locks.Lock()):
pass
def test_context_manager_misuse(self):
# Ensure we catch a "with lock", which should be
# "with (yield lock.acquire())".
with self.assertRaises(RuntimeError):
with locks.Lock():
pass
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python3.8
'Calculations.'
import numpy as np
import cv2 as cv
from process_image import shape, odd, rotate
from images import Images
from angle import Angle
class Calculate():
'Calculate results.'
def __init__(self, core, input_images):
self.settings = core.settings.settings
self.imgs = core.settings.images
self.log = core.log
self.results = core.results
self.images = Images(core, input_images, self.calculate_soil_z)
self.z_info = self.images._get_z_info()
self.calculated_angle = 0
def check_images(self):
'Check capture images.'
self.log.debug('Checking images...', verbosity=2)
for images in self.images.input.values():
for image in images:
if image.image is None:
self.log.error('Image missing.')
pre_rotation_angle = self.settings['pre_rotation_angle']
if pre_rotation_angle:
image.image = rotate(image.image, pre_rotation_angle)
image.reduce_data()
content = image.data.report
self.log.debug(content['report'])
if content['coverage'] < self.settings['input_coverage_threshold']:
self.log.error('Not enough detail. Check recent images.')
def _validate_calibration_data(self):
calibrated = {
'width': self.settings['calibration_image_width'],
'height': self.settings['calibration_image_height']}
current = shape(self.images.input['left'][0].image)
mismatch = {k: (v and v != current[k]) for k, v in calibrated.items()}
if any(mismatch.values()):
self.log.error('Image size must match calibration.')
def _z_at_dist(self, distance, z_reference=None):
if z_reference is None:
z_reference = self.z_info['current']
z_value = z_reference + self.z_info['direction'] * distance
return 0 if np.isnan(z_value) else int(z_value)
def calculate_soil_z(self, disparity_value):
'Calculate soil z from disparity value.'
calculated_soil_z = None
measured_distance = self.settings['measured_distance']
measured_at_z = self.settings['calibration_measured_at_z']
measured_soil_z = self._z_at_dist(measured_distance, measured_at_z)
disparity_offset = self.settings['calibration_disparity_offset']
calibration_factor = self.settings['calibration_factor']
current_z = self.z_info['current']
direction = self.z_info['direction']
values = {
'measured_distance': measured_distance,
'z_offset_from_measured': self.z_info['offset'],
'new_meas_dist': measured_distance - self.z_info['offset'],
'measured_at_z': measured_at_z,
'measured_soil_z': measured_soil_z,
'disparity_offset': disparity_offset,
'calibration_factor': calibration_factor,
'current_z': current_z,
'direction': direction,
'disparity': disparity_value,
'calculated_soil_z': calculated_soil_z,
}
calcs = [''] * 4
calcs[0] += f'({measured_soil_z = :<7}) = '
calcs[0] += f'({measured_at_z = :<7})'
calcs[0] += f' + {direction} * ({measured_distance = })'
if calibration_factor == 0:
return calculated_soil_z, {'lines': calcs, 'values': values}
self._validate_calibration_data()
disparity_delta = disparity_value - disparity_offset
distance = measured_distance - disparity_delta * calibration_factor
calculated_soil_z = self._z_at_dist(distance)
values['disparity_delta'] = round(disparity_delta, 4)
values['calc_distance'] = round(distance, 4)
values['calculated_soil_z'] = calculated_soil_z
calcs[1] += f'({disparity_delta = :<7.1f}) = '
calcs[1] += f'({disparity_value = :<7}) - ({disparity_offset = })'
calcs[2] += f'({distance = :<7.1f}) = '
calcs[2] += f'({measured_distance = :<7})'
calcs[2] += f' - ({disparity_delta = :.1f}) * ({calibration_factor = })'
calcs[3] += f'({calculated_soil_z = :<7}) = '
calcs[3] += f'({current_z = :<7}) + {direction} * ({distance = :.1f})'
return calculated_soil_z, {'lines': calcs, 'values': values}
def _from_stereo(self):
self.log.debug('Calculating disparity...', verbosity=2)
num_disparities = int(16 * self.settings['disparity_search_depth'])
block_size_setting = int(self.settings['disparity_block_size'])
block_size = min(max(5, odd(block_size_setting)), 255)
if block_size != block_size_setting:
self.settings['disparity_block_size'] = block_size
self.results.save_config('disparity_block_size')
stereo = cv.StereoBM().create(num_disparities, block_size)
disparities = []
for j, left_image in enumerate(self.images.input['left']):
for k, right_image in enumerate(self.images.input['right']):
left = left_image.preprocess()
right = right_image.preprocess()
result = stereo.compute(left, right)
multiple = len(self.images.input['left']) > 1
if multiple and self.imgs['multi_depth']:
tag = f'disparity_{j}_{k}'
self.images.output_init(result, tag, reduce=False)
self.images.output[tag].normalize()
self.images.output[tag].save(f'depth_map_bw_{j}_{k}')
disparities.append(result)
disparity_data = disparities[0]
for computed in disparities[1:]:
mask = disparity_data < self.settings['pixel_value_threshold']
disparity_data[mask] = computed[mask]
self.images.output_init(disparity_data, 'disparity_from_stereo')
def _from_flow(self):
self.log.debug('Calculating flow...')
flow = Angle(self.settings, self.log, self.images)
flow.calculate()
self.images.set_angle(flow.angle)
self.calculated_angle = flow.angle
disparity_from_flow = self.images.output['disparity_from_flow']
_soil_z_ff, details_ff = self.calculate_soil_z(
disparity_from_flow.data.reduced['stats']['mid'])
disparity_from_flow.data.report['calculations'] = details_ff
def calculate_disparity(self):
'Calculate and reduce disparity data.'
self._from_flow()
self._from_stereo()
output = self.images.output
output['raw_disparity'] = output.get('disparity_from_stereo')
if self.settings['use_flow']:
self.images.rotated = False
output['raw_disparity'] = output.get('disparity_from_flow')
if output['raw_disparity'] is None:
self.log.error('No algorithm chosen.')
disparity = self.images.filter_plants(output['raw_disparity'].image)
disparity[-1][-1] = self.settings['calibration_maximum']
self.images.output_init(disparity, 'disparity')
self._check_disparity()
def _check_disparity(self):
data = self.images.output['disparity'].data
if data.data.max() < 1:
msg = 'Zero disparity.'
self.save_debug_output()
self.log.error(msg)
percent_threshold = self.settings['disparity_percent_threshold']
if data.reduced['stats']['mid_size_p'] < percent_threshold:
msg = "Couldn't find surface."
self.save_debug_output()
self.log.error(msg)
def calculate(self):
'Calculate disparity, calibration factor, and soil height.'
self.check_images()
missing_measured_distance = self.settings['measured_distance'] == 0
missing_calibration_factor = self.settings['calibration_factor'] == 0
if missing_measured_distance and missing_calibration_factor:
self.log.error('Calibration measured distance input required.')
self.calculate_disparity()
self.disparity_debug_logs()
missing_disparity_offset = self.settings['calibration_disparity_offset'] == 0
if missing_disparity_offset:
self.set_disparity_offset()
elif missing_calibration_factor:
self.set_calibration_factor()
self.results.save_calibration()
details = {}
if not missing_disparity_offset:
disparity = self.images.output['disparity'].data.report
soil_z, details = self.calculate_soil_z(disparity['mid'])
if len(details['lines']) > 0:
self.log.debug('\n'.join(details['lines']))
disparity['calculations'] = details
low_soil_z, _ = self.calculate_soil_z(disparity['low'])
high_soil_z, _ = self.calculate_soil_z(disparity['high'])
soil_z_range_text = f'Soil z range: {low_soil_z} to {high_soil_z}'
self.log.debug(soil_z_range_text, verbosity=2)
disparity['calculations']['lines'].append(soil_z_range_text)
use_flow = self.settings['use_flow']
alt = 'disparity_from_stereo' if use_flow else 'disparity_from_flow'
disparity_alt = self.images.output.get(alt)
if disparity_alt is not None:
details_alt = disparity_alt.data.report.get('calculations')
if details_alt is not None:
soil_z_alt = details_alt['values']['calculated_soil_z']
msg = f'(alternate method would have calculated {soil_z_alt})'
self.log.debug(msg)
if missing_calibration_factor:
self.check_soil_z(details['values'])
self.results.save_soil_height(soil_z)
details['title'] = self.images.core.settings.title
details['method'] = 'flow' if self.settings['use_flow'] else 'stereo'
details['angle'] = self.calculated_angle
self.save_debug_output()
return details
def save_debug_output(self):
'Save debug output.'
self.images.save()
self.images.save_data()
self.results.save_report(self.images)
def check_soil_z(self, values):
'Verify soil z height is within expected range.'
calculated_soil_z = values['calculated_soil_z']
expected_soil_z = values['measured_soil_z']
if abs(calculated_soil_z - expected_soil_z) > 2:
error_message = 'Soil height calculation error: '
error_message += f'expected {expected_soil_z} got {calculated_soil_z}'
self.log.error(error_message)
def disparity_debug_logs(self):
'Send disparity debug logs.'
disparity = self.images.output['disparity'].data.report
value = disparity['mid']
coverage = disparity['coverage']
self.log.debug(disparity['report'])
self.log.debug(f'Average disparity: {value} {coverage}% coverage')
if coverage < self.settings['disparity_coverage_threshold']:
self.log.error('Not enough disparity information. Check images.')
def set_disparity_offset(self):
'Set disparity offset.'
self.log.debug('Saving disparity offset...')
disparity = self.images.output['disparity'].data
self.settings['calibration_disparity_offset'] = disparity.report['mid']
self.log.debug(f'z: {self.z_info}')
self.settings['calibration_measured_at_z'] = self.z_info['current']
img_size = shape(self.images.input['left'][0].image)
self.settings['calibration_image_width'] = img_size['width']
self.settings['calibration_image_height'] = img_size['height']
self.settings['calibration_maximum'] = int(disparity.data.max())
def set_calibration_factor(self):
'Set calibration_factor.'
self.log.debug('Calculating calibration factor...', verbosity=2)
disparity = self.images.output['disparity'].data.report['mid']
disparity_offset = self.settings['calibration_disparity_offset']
disparity_difference = disparity - disparity_offset
if disparity_difference == 0:
self.log.error('Zero disparity difference.')
if self.z_info['offset'] == 0:
self.log.debug(f'z: {self.z_info}')
self.log.error('Zero offset.')
factor = round(self.z_info['offset'] / disparity_difference, 4)
self.settings['calibration_factor'] = factor
|
|
# from future import standard_library
# standard_library.install_aliases()
try:
from builtins import object
except ImportError:
pass
import struct
import io
import logging
import zlib
import six
from Crypto import Random
from Crypto.Hash import SHA
from Crypto.Util.number import bytes_to_long
from Crypto.Util.number import long_to_bytes
from Crypto.Cipher import PKCS1_v1_5
from Crypto.Cipher import PKCS1_OAEP
from jwkest import b64d, as_bytes
from jwkest import b64e
from jwkest import JWKESTException
from jwkest import MissingKey
from jwkest.aes_gcm import AES_GCM
from jwkest.aes_key_wrap import aes_wrap_key
from jwkest.aes_key_wrap import aes_unwrap_key
from jwkest.ecc import NISTEllipticCurve
from jwkest.extra import aes_cbc_hmac_encrypt
from jwkest.extra import ecdh_derive_key
from jwkest.extra import aes_cbc_hmac_decrypt
from jwkest.jwk import intarr2str
from jwkest.jwk import ECKey
from jwkest.jws import JWx
from jwkest.jwt import JWT, b64encode_item
logger = logging.getLogger(__name__)
__author__ = 'rohe0002'
ENC = 1
DEC = 0
class JWEException(JWKESTException):
pass
class CannotDecode(JWEException):
pass
class NotSupportedAlgorithm(JWEException):
pass
class MethodNotSupported(JWEException):
pass
class ParameterError(JWEException):
pass
class NoSuitableEncryptionKey(JWEException):
pass
class NoSuitableDecryptionKey(JWEException):
pass
class DecryptionFailed(JWEException):
pass
class WrongEncryptionAlgorithm(JWEException):
pass
# ---------------------------------------------------------------------------
# Base class
KEYLEN = {
"A128GCM": 128,
"A192GCM": 192,
"A256GCM": 256,
"A128CBC-HS256": 256,
"A192CBC-HS384": 384,
"A256CBC-HS512": 512
}
class Encrypter(object):
"""Abstract base class for encryption algorithms."""
def __init__(self, with_digest=False):
self.with_digest = with_digest
def encrypt(self, msg, key):
"""Encrypt ``msg`` with ``key`` and return the encrypted message."""
raise NotImplementedError
def decrypt(self, msg, key):
"""Return decrypted message."""
raise NotImplementedError
class RSAEncrypter(Encrypter):
def encrypt(self, msg, key, padding="pkcs1_padding"):
if padding == "pkcs1_padding":
cipher = PKCS1_v1_5.new(key)
if self.with_digest: # add a SHA digest to the message
h = SHA.new(msg)
msg += h.digest()
elif padding == "pkcs1_oaep_padding":
cipher = PKCS1_OAEP.new(key)
else:
raise Exception("Unsupported padding")
return cipher.encrypt(msg)
def decrypt(self, ciphertext, key, padding="pkcs1_padding"):
if padding == "pkcs1_padding":
cipher = PKCS1_v1_5.new(key)
if self.with_digest:
dsize = SHA.digest_size
else:
dsize = 0
sentinel = Random.new().read(32 + dsize)
text = cipher.decrypt(ciphertext, sentinel)
if dsize:
_digest = text[-dsize:]
_msg = text[:-dsize]
digest = SHA.new(_msg).digest()
if digest == _digest:
text = _msg
else:
raise DecryptionFailed()
else:
if text == sentinel:
raise DecryptionFailed()
elif padding == "pkcs1_oaep_padding":
cipher = PKCS1_OAEP.new(key)
text = cipher.decrypt(ciphertext)
else:
raise Exception("Unsupported padding")
return text
# ---------------------------------------------------------------------------
def int2bigendian(n):
return [ord(c) for c in struct.pack('>I', n)]
def party_value(pv):
if pv:
s = b64e(pv)
r = int2bigendian(len(s))
r.extend(s)
return r
else:
return [0, 0, 0, 0]
def _hash_input(cmk, enc, label, rond=1, length=128, hashsize=256,
epu="", epv=""):
r = [0, 0, 0, rond]
r.extend(cmk)
r.extend([0, 0, 0, length])
r.extend([ord(c) for c in enc])
r.extend(party_value(epu))
r.extend(party_value(epv))
r.extend(label)
return r
# ---------------------------------------------------------------------------
def cipher_filter(cipher, inf, outf):
while 1:
buf = inf.read()
if not buf:
break
outf.write(cipher.update(buf))
outf.write(cipher.final())
return outf.getvalue()
def aes_enc(key, txt):
pbuf = io.StringIO(txt)
cbuf = io.StringIO()
ciphertext = cipher_filter(key, pbuf, cbuf)
pbuf.close()
cbuf.close()
return ciphertext
def aes_dec(key, ciptxt):
pbuf = io.StringIO()
cbuf = io.StringIO(ciptxt)
plaintext = cipher_filter(key, cbuf, pbuf)
pbuf.close()
cbuf.close()
return plaintext
def keysize(spec):
if spec.startswith("HS"):
return int(spec[2:])
elif spec.startswith("CS"):
return int(spec[2:])
elif spec.startswith("A"):
return int(spec[1:4])
return 0
ENC2ALG = {"A128CBC": "aes_128_cbc", "A192CBC": "aes_192_cbc",
"A256CBC": "aes_256_cbc"}
SUPPORTED = {
"alg": ["RSA1_5", "RSA-OAEP", "A128KW", "A192KW", "A256KW",
"ECDH-ES", "ECDH-ES+A128KW", "ECDH-ES+A192KW", "ECDH-ES+A256KW"],
"enc": ["A128CBC-HS256", "A192CBC-HS384", "A256CBC-HS512",
# "A128GCM", "A192GCM",
"A256GCM"],
}
def alg2keytype(alg):
if alg.startswith("RSA"):
return "RSA"
elif alg.startswith("A"):
return "oct"
elif alg.startswith("ECDH"):
return "EC"
else:
return None
# =============================================================================
ENCALGLEN1 = {
"A128GCM": 16,
"A192GCM": 24,
"A256GCM": 32
}
ENCALGLEN2 = {
"A128CBC-HS256": 32,
"A192CBC-HS384": 48,
"A256CBC-HS512": 64,
}
class JWEnc(JWT):
def b64_protected_header(self):
return self.b64part[0]
def b64_encrypted_key(self):
return self.b64part[1]
def b64_initialization_vector(self):
return self.b64part[2]
def b64_ciphertext(self):
return self.b64part[3]
def b64_authentication_tag(self):
return self.b64part[4]
def protected_header(self):
return self.part[0]
def encrypted_key(self):
return self.part[1]
def initialization_vector(self):
return self.part[2]
def ciphertext(self):
return self.part[3]
def authentication_tag(self):
return self.part[4]
def b64_encode_header(self):
return b64encode_item(self.headers)
def is_jwe(self):
if "typ" in self.headers and self.headers["typ"].lower() == "jwe":
return True
try:
assert "alg" in self.headers and "enc" in self.headers
except AssertionError:
return False
else:
for typ in ["alg", "enc"]:
try:
assert self.headers[typ] in SUPPORTED[typ]
except AssertionError:
logger.debug("Not supported %s algorithm: %s" % (
typ, self.headers[typ]))
return False
return True
class JWe(JWx):
@staticmethod
def _generate_key_and_iv(encalg, cek="", iv=""):
if cek and iv:
return cek, iv
try:
_key = Random.get_random_bytes(ENCALGLEN1[encalg])
_iv = Random.get_random_bytes(12)
except KeyError:
try:
_key = Random.get_random_bytes(ENCALGLEN2[encalg])
_iv = Random.get_random_bytes(16)
except KeyError:
raise Exception("Unsupported encryption algorithm %s" % encalg)
if cek:
_key = cek
if iv:
_iv = iv
return _key, _iv
def alg2keytype(self, alg):
return alg2keytype(alg)
def enc_setup(self, enc_alg, msg, auth_data, key=None, iv=""):
""" Encrypt JWE content.
:param enc_alg: The JWE "enc" value specifying the encryption algorithm
:param msg: The plain text message
:param auth_data: Additional authenticated data
:param key: Key (CEK)
:return: Tuple (ciphertext, tag), both as bytes
"""
key, iv = self._generate_key_and_iv(enc_alg, key, iv)
if enc_alg == "A256GCM":
gcm = AES_GCM(bytes_to_long(key))
ctxt, tag = gcm.encrypt(bytes_to_long(iv), msg, auth_data)
tag = long_to_bytes(tag)
elif enc_alg in ["A128CBC-HS256", "A192CBC-HS384", "A256CBC-HS512"]:
assert enc_alg in SUPPORTED["enc"]
ctxt, tag = aes_cbc_hmac_encrypt(key, iv, auth_data, msg)
else:
raise NotSupportedAlgorithm(enc_alg)
return ctxt, tag, key
@staticmethod
def _decrypt(enc, key, ctxt, auth_data, iv, tag):
""" Decrypt JWE content.
:param enc: The JWE "enc" value specifying the encryption algorithm
:param key: Key (CEK)
:param iv : Initialization vector
:param auth_data: Additional authenticated data (AAD)
:param ctxt : Ciphertext
:param tag: Authentication tag
:return: plain text message or None if decryption failed
"""
if enc in ["A128GCM", "A192GCM", "A256GCM"]:
gcm = AES_GCM(bytes_to_long(key))
try:
text = gcm.decrypt(bytes_to_long(iv), ctxt, bytes_to_long(tag),
auth_data)
return text, True
except DecryptionFailed:
return None, False
elif enc in ["A128CBC-HS256", "A192CBC-HS384", "A256CBC-HS512"]:
return aes_cbc_hmac_decrypt(key, iv, auth_data, ctxt, tag)
else:
raise Exception("Unsupported encryption algorithm %s" % enc)
class JWE_SYM(JWe):
args = JWe.args[:]
args.append("enc")
def encrypt(self, key, iv="", cek="", **kwargs):
"""
:param key: Shared symmetric key
:param iv: initialization vector
:param cek:
:param kwargs: Extra keyword arguments, just ignore for now.
:return:
"""
_msg = self.msg
_args = self._dict
try:
_args["kid"] = kwargs["kid"]
except KeyError:
pass
jwe = JWEnc(**_args)
# If no iv and cek are given generate them
cek, iv = self._generate_key_and_iv(self["enc"], cek, iv)
if isinstance(key, six.binary_type):
kek = key
else:
kek = intarr2str(key)
# The iv for this function must be 64 bit
# Which is certainly different from the one above
jek = aes_wrap_key(kek, cek)
_enc = self["enc"]
ctxt, tag, cek = self.enc_setup(_enc, _msg.encode(),
jwe.b64_encode_header(),
cek, iv=iv)
return jwe.pack(parts=[jek, iv, ctxt, tag])
def decrypt(self, token, key=None, cek=None):
if not key and not cek:
raise MissingKey("On of key or cek must be specified")
jwe = JWEnc().unpack(token)
if not cek:
jek = jwe.encrypted_key()
# The iv for this function must be 64 bit
cek = aes_unwrap_key(key, jek)
msg = self._decrypt(
jwe.headers["enc"], cek, jwe.ciphertext(),
jwe.b64_protected_header(),
jwe.initialization_vector(), jwe.authentication_tag())
if "zip" in self and self["zip"] == "DEF":
msg = zlib.decompress(msg)
return msg
class JWE_RSA(JWe):
args = ["msg", "alg", "enc", "epk", "zip", "jku", "jwk", "x5u", "x5t",
"x5c", "kid", "typ", "cty", "apu", "crit"]
def encrypt(self, key, iv="", cek="", **kwargs):
"""
Produces a JWE using RSA algorithms
:param key: RSA key
:param context:
:param iv:
:param cek:
:return: A jwe
"""
_msg = as_bytes(self.msg)
if "zip" in self:
if self["zip"] == "DEF":
_msg = zlib.compress(_msg)
else:
raise ParameterError("Zip has unknown value: %s" % self["zip"])
_enc = self["enc"]
cek, iv = self._generate_key_and_iv(_enc, cek, iv)
logger.debug("cek: %s, iv: %s" % ([c for c in cek], [c for c in iv]))
_encrypt = RSAEncrypter(self.with_digest).encrypt
_alg = self["alg"]
if _alg == "RSA-OAEP":
jwe_enc_key = _encrypt(cek, key, 'pkcs1_oaep_padding')
elif _alg == "RSA1_5":
jwe_enc_key = _encrypt(cek, key)
else:
raise NotSupportedAlgorithm(_alg)
jwe = JWEnc(**self.headers())
enc_header = jwe.b64_encode_header()
ctxt, tag, key = self.enc_setup(_enc, _msg, enc_header, cek, iv)
return jwe.pack(parts=[jwe_enc_key, iv, ctxt, tag])
def decrypt(self, token, key):
""" Decrypts a JWT
:param token: The JWT
:param key: A key to use for decrypting
:return: The decrypted message
"""
jwe = JWEnc().unpack(token)
self.jwt = jwe.encrypted_key()
jek = jwe.encrypted_key()
_decrypt = RSAEncrypter(self.with_digest).decrypt
_alg = jwe.headers["alg"]
if _alg == "RSA-OAEP":
cek = _decrypt(jek, key, 'pkcs1_oaep_padding')
elif _alg == "RSA1_5":
cek = _decrypt(jek, key)
else:
raise NotSupportedAlgorithm(_alg)
enc = jwe.headers["enc"]
try:
assert enc in SUPPORTED["enc"]
except AssertionError:
raise NotSupportedAlgorithm(enc)
msg, flag = self._decrypt(enc, cek, jwe.ciphertext(),
jwe.b64_protected_header(),
jwe.initialization_vector(),
jwe.authentication_tag())
if flag is False:
raise DecryptionFailed()
if "zip" in jwe.headers and jwe.headers["zip"] == "DEF":
msg = zlib.decompress(msg)
return msg
class JWE_EC(JWe):
def enc_setup(self, msg, auth_data, key=None, **kwargs):
encrypted_key = ""
# Generate the input parameters
try:
apu = b64d(kwargs["apu"])
except KeyError:
apu = b64d(Random.get_random_bytes(16))
try:
apv = b64d(kwargs["apv"])
except KeyError:
apv = b64d(Random.get_random_bytes(16))
# Generate an ephemeral key pair
curve = NISTEllipticCurve.by_name(key.crv)
if "epk" in kwargs:
eprivk = ECKey(kwargs["epk"])
else:
(eprivk, epk) = curve.key_pair()
params = {
"apu": b64e(apu),
"apv": b64e(apv),
}
cek, iv = self._generate_key_and_iv(self.enc)
if self.alg == "ECDH-ES":
try:
dk_len = KEYLEN[self.enc]
except KeyError:
raise Exception(
"Unknown key length for algorithm %s" % self.enc)
cek = ecdh_derive_key(curve, eprivk, key, apu, apv, self.enc,
dk_len)
elif self.alg in ["ECDH-ES+A128KW", "ECDH-ES+A192KW", "ECDH-ES+A256KW"]:
_pre, _post = self.alg.split("+")
klen = int(_post[1:4])
kek = ecdh_derive_key(curve, eprivk, key, apu, apv, _post, klen)
encrypted_key = aes_wrap_key(kek, cek)
else:
raise Exception("Unsupported algorithm %s" % self.alg)
return cek, encrypted_key, iv, params
class JWE(JWx):
args = ["alg", "enc", "epk", "zip", "jku", "jwk", "x5u", "x5t",
"x5c", "kid", "typ", "cty", "apu", "crit"]
"""
:param msg: The message
:param alg: Algorithm
:param enc: Encryption Method
:param epk: Ephemeral Public Key
:param zip: Compression Algorithm
:param jku: a URI that refers to a resource for a set of JSON-encoded
public keys, one of which corresponds to the key used to digitally
sign the JWS
:param jwk: A JSON Web Key that corresponds to the key used to
digitally sign the JWS
:param x5u: a URI that refers to a resource for the X.509 public key
certificate or certificate chain [RFC5280] corresponding to the key
used to digitally sign the JWS.
:param x5t: a base64url encoded SHA-1 thumbprint (a.k.a. digest) of the
DER encoding of the X.509 certificate [RFC5280] corresponding to
the key used to digitally sign the JWS.
:param x5c: the X.509 public key certificate or certificate chain
corresponding to the key used to digitally sign the JWS.
:param kid: Key ID a hint indicating which key was used to secure the
JWS.
:param typ: the type of this object. 'JWS' == JWS Compact Serialization
'JWS+JSON' == JWS JSON Serialization
:param cty: Content Type
:param apu: Agreement PartyUInfo
:param crit: indicates which extensions that are being used and MUST
be understood and processed.
:return: A class instance
"""
def encrypt(self, keys=None, cek="", iv="", **kwargs):
"""
:param keys: A set of possibly usable keys
:param context: If the other party's public or my private key should be
used for encryption
:param cek: Content master key
:param iv: Initialization vector
:param kwargs: Extra key word arguments
:return: Encrypted message
"""
_alg = self["alg"]
if _alg.startswith("RSA") and _alg in ["RSA-OAEP", "RSA1_5"]:
encrypter = JWE_RSA(self.msg, **self._dict)
elif _alg.startswith("A") and _alg.endswith("KW"):
encrypter = JWE_SYM(self.msg, **self._dict)
else:
logger.error("'{}' is not a supported algorithm".format(_alg))
raise NotSupportedAlgorithm
if keys:
keys = self._pick_keys(keys, use="enc")
else:
keys = self._pick_keys(self._get_keys(), use="enc")
if not keys:
logger.error(
"Could not find any suitable encryption key for alg='{"
"}'".format(_alg))
raise NoSuitableEncryptionKey(_alg)
if cek:
kwargs["cek"] = cek
if iv:
kwargs["iv"] = iv
for key in keys:
_key = key.encryption_key(alg=_alg, private=True)
if key.kid:
encrypter["kid"] = key.kid
try:
token = encrypter.encrypt(_key, **kwargs)
except TypeError as err:
raise err
else:
logger.debug(
"Encrypted message using key with kid={}".format(key.kid))
return token
logger.error("Could not find any suitable encryption key")
raise NoSuitableEncryptionKey()
def decrypt(self, token, keys=None, alg=None):
jwe = JWEnc().unpack(token)
# header, ek, eiv, ctxt, tag = token.split(b".")
# self.parse_header(header)
_alg = jwe.headers["alg"]
if alg and alg != _alg:
raise WrongEncryptionAlgorithm()
if _alg in ["RSA-OAEP", "RSA1_5"]:
decrypter = JWE_RSA(**self._dict)
elif _alg.startswith("A") and _alg.endswith("KW"):
decrypter = JWE_SYM(self.msg, **self._dict)
else:
raise NotSupportedAlgorithm
if keys:
keys = self._pick_keys(keys, use="enc", alg=_alg)
else:
keys = self._pick_keys(self._get_keys(), use="enc", alg=_alg)
if not keys:
raise NoSuitableDecryptionKey(_alg)
for key in keys:
_key = key.encryption_key(alg=_alg, private=False)
try:
msg = decrypter.decrypt(as_bytes(token), _key)
except (KeyError, DecryptionFailed):
pass
else:
logger.debug(
"Decrypted message using key with kid=%s" % key.kid)
return msg
raise DecryptionFailed(
"No available key that could decrypt the message")
def factory(token):
_jwt = JWEnc().unpack(token)
if _jwt.is_jwe():
_jwe = JWE()
_jwe.jwt = _jwt
return _jwe
else:
return None
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import datetime
from http import client as http_client
import os
import shutil
from urllib import parse as urlparse
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import uuidutils
import requests
from ironic.common import exception
from ironic.common.glance_service.image_service import GlanceImageService
from ironic.common.i18n import _
from ironic.common import utils
from ironic.conf import CONF
IMAGE_CHUNK_SIZE = 1024 * 1024 # 1mb
# NOTE(kaifeng) Image will be truncated to 2GiB by sendfile,
# we use a large chunk size here for a better performance
# while keep the chunk size less than the size limit.
SENDFILE_CHUNK_SIZE = 1024 * 1024 * 1024 # 1Gb
LOG = log.getLogger(__name__)
class BaseImageService(object, metaclass=abc.ABCMeta):
"""Provides retrieval of disk images."""
@abc.abstractmethod
def validate_href(self, image_href):
"""Validate image reference.
:param image_href: Image reference.
:raises: exception.ImageRefValidationFailed.
:returns: Information needed to further operate with an image.
"""
@abc.abstractmethod
def download(self, image_href, image_file):
"""Downloads image to specified location.
:param image_href: Image reference.
:param image_file: File object to write data to.
:raises: exception.ImageRefValidationFailed.
:raises: exception.ImageDownloadFailed.
"""
@abc.abstractmethod
def show(self, image_href):
"""Get dictionary of image properties.
:param image_href: Image reference.
:raises: exception.ImageRefValidationFailed.
:returns: dictionary of image properties. It has three of them: 'size',
'updated_at' and 'properties'. 'updated_at' attribute is a naive
UTC datetime object.
"""
class HttpImageService(BaseImageService):
"""Provides retrieval of disk images using HTTP."""
def validate_href(self, image_href, secret=False):
"""Validate HTTP image reference.
:param image_href: Image reference.
:param secret: Specify if image_href being validated should not be
shown in exception message.
:raises: exception.ImageRefValidationFailed if HEAD request failed or
returned response code not equal to 200.
:returns: Response to HEAD request.
"""
output_url = 'secreturl' if secret else image_href
try:
verify = strutils.bool_from_string(CONF.webserver_verify_ca,
strict=True)
except ValueError:
verify = CONF.webserver_verify_ca
try:
response = requests.head(image_href, verify=verify,
timeout=CONF.webserver_connection_timeout)
if response.status_code != http_client.OK:
raise exception.ImageRefValidationFailed(
image_href=output_url,
reason=_("Got HTTP code %s instead of 200 in response "
"to HEAD request.") % response.status_code)
except (OSError, requests.ConnectionError,
requests.RequestException) as e:
raise exception.ImageRefValidationFailed(image_href=output_url,
reason=str(e))
return response
def download(self, image_href, image_file):
"""Downloads image to specified location.
:param image_href: Image reference.
:param image_file: File object to write data to.
:raises: exception.ImageRefValidationFailed if GET request returned
response code not equal to 200.
:raises: exception.ImageDownloadFailed if:
* IOError happened during file write;
* GET request failed.
"""
try:
verify = strutils.bool_from_string(CONF.webserver_verify_ca,
strict=True)
except ValueError:
verify = CONF.webserver_verify_ca
try:
response = requests.get(image_href, stream=True, verify=verify,
timeout=CONF.webserver_connection_timeout)
if response.status_code != http_client.OK:
raise exception.ImageRefValidationFailed(
image_href=image_href,
reason=_("Got HTTP code %s instead of 200 in response "
"to GET request.") % response.status_code)
with response.raw as input_img:
shutil.copyfileobj(input_img, image_file, IMAGE_CHUNK_SIZE)
except (OSError, requests.ConnectionError, requests.RequestException,
IOError) as e:
raise exception.ImageDownloadFailed(image_href=image_href,
reason=str(e))
def show(self, image_href):
"""Get dictionary of image properties.
:param image_href: Image reference.
:raises: exception.ImageRefValidationFailed if:
* HEAD request failed;
* HEAD request returned response code not equal to 200;
* Content-Length header not found in response to HEAD request.
:returns: dictionary of image properties. It has three of them: 'size',
'updated_at' and 'properties'. 'updated_at' attribute is a naive
UTC datetime object.
"""
response = self.validate_href(image_href)
image_size = response.headers.get('Content-Length')
if image_size is None:
raise exception.ImageRefValidationFailed(
image_href=image_href,
reason=_("Cannot determine image size as there is no "
"Content-Length header specified in response "
"to HEAD request."))
# Parse last-modified header to return naive datetime object
str_date = response.headers.get('Last-Modified')
date = None
if str_date:
http_date_format_strings = [
'%a, %d %b %Y %H:%M:%S GMT', # RFC 822
'%A, %d-%b-%y %H:%M:%S GMT', # RFC 850
'%a %b %d %H:%M:%S %Y' # ANSI C
]
for fmt in http_date_format_strings:
try:
date = datetime.datetime.strptime(str_date, fmt)
break
except ValueError:
continue
no_cache = 'no-store' in response.headers.get('Cache-Control', '')
return {
'size': int(image_size),
'updated_at': date,
'properties': {},
'no_cache': no_cache,
}
class FileImageService(BaseImageService):
"""Provides retrieval of disk images available locally on the conductor."""
def validate_href(self, image_href):
"""Validate local image reference.
:param image_href: Image reference.
:raises: exception.ImageRefValidationFailed if source image file
doesn't exist.
:returns: Path to image file if it exists.
"""
image_path = urlparse.urlparse(image_href).path
if not os.path.isfile(image_path):
raise exception.ImageRefValidationFailed(
image_href=image_href,
reason=_("Specified image file not found."))
return image_path
def download(self, image_href, image_file):
"""Downloads image to specified location.
:param image_href: Image reference.
:param image_file: File object to write data to.
:raises: exception.ImageRefValidationFailed if source image file
doesn't exist.
:raises: exception.ImageDownloadFailed if exceptions were raised while
writing to file or creating hard link.
"""
source_image_path = self.validate_href(image_href)
dest_image_path = image_file.name
local_device = os.stat(dest_image_path).st_dev
try:
# We should have read and write access to source file to create
# hard link to it.
if (local_device == os.stat(source_image_path).st_dev
and os.access(source_image_path, os.R_OK | os.W_OK)):
image_file.close()
os.remove(dest_image_path)
os.link(source_image_path, dest_image_path)
else:
filesize = os.path.getsize(source_image_path)
offset = 0
with open(source_image_path, 'rb') as input_img:
while offset < filesize:
count = min(SENDFILE_CHUNK_SIZE, filesize - offset)
nbytes_out = os.sendfile(image_file.fileno(),
input_img.fileno(),
offset,
count)
offset += nbytes_out
except Exception as e:
raise exception.ImageDownloadFailed(image_href=image_href,
reason=str(e))
def show(self, image_href):
"""Get dictionary of image properties.
:param image_href: Image reference.
:raises: exception.ImageRefValidationFailed if image file specified
doesn't exist.
:returns: dictionary of image properties. It has three of them: 'size',
'updated_at' and 'properties'. 'updated_at' attribute is a naive
UTC datetime object.
"""
source_image_path = self.validate_href(image_href)
return {
'size': os.path.getsize(source_image_path),
'updated_at': utils.unix_file_modification_datetime(
source_image_path),
'properties': {},
# No point in caching local file images
'no_cache': True,
}
protocol_mapping = {
'http': HttpImageService,
'https': HttpImageService,
'file': FileImageService,
'glance': GlanceImageService,
}
def get_image_service(image_href, client=None, context=None):
"""Get image service instance to download the image.
:param image_href: String containing href to get image service for.
:param client: Glance client to be used for download, used only if
image_href is Glance href.
:param context: request context, used only if image_href is Glance href.
:raises: exception.ImageRefValidationFailed if no image service can
handle specified href.
:returns: Instance of an image service class that is able to download
specified image.
"""
scheme = urlparse.urlparse(image_href).scheme.lower()
if not scheme:
if uuidutils.is_uuid_like(str(image_href)):
cls = GlanceImageService
else:
raise exception.ImageRefValidationFailed(
image_href=image_href,
reason=_('Scheme-less image href is not a UUID.'))
else:
cls = protocol_mapping.get(scheme)
if not cls:
raise exception.ImageRefValidationFailed(
image_href=image_href,
reason=_('Image download protocol %s is not supported.'
) % scheme)
if cls == GlanceImageService:
return cls(client, context)
return cls()
|
|
"""
Generate Carbon Diamond Lattice
@author: Kenny Jolley
Derived from similar form by Chris Scott
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import numpy as np
from ..system.lattice import Lattice
from . import lattice_gen_utils
from six.moves import range
################################################################################
class Args(object):
"""
NCells: 3-tuple containing number of unit cells in each direction (default=(6,6,6))
charge is defaulted to zero, and should not be changed for pure carbon systems
a0: lattice 'a' constant (default=3.556717) (AIREBO)
pbcx,pbcy,pbcz: PBCs in each direction (default=True)
"""
def __init__(self, sym1="C_", charge1=0.0, NCells=[6,6,6], a0=3.556717,
pbcx=True, pbcy=True, pbcz=True, quiet=False):
self.sym1 = sym1
self.charge1 = charge1
self.NCells = NCells
self.a0 = a0
self.pbcx = pbcx
self.pbcy = pbcy
self.pbcz = pbcz
################################################################################
class DiamondLatticeGenerator(object):
"""
Carbon Diamond lattice generator.
"""
def __init__(self, log=None):
self.logger = log
def log(self, message, level=0, indent=0):
"""
Write log message.
"""
if self.logger is not None:
self.logger(message, level=level, indent=indent)
def generateLattice(self, args):
"""
Generate the lattice.
"""
logger = logging.getLogger(__name__)
logger.info("Generating Fluorite lattice")
# lattice constants
a0 = args.a0
b0 = a0 / 4.0
b1 = a0 / 2.0
b2 = 3.0 * b0
# define primitive cell
# atom symbol (this defaults to carbon)
sym_uc = args.sym1
# positions
pos_uc = np.empty(3 * 12, np.float64)
pos_uc[0] = 0.0; pos_uc[1] = 0.0; pos_uc[2] = 0.0
pos_uc[3] = b0; pos_uc[4] = b0; pos_uc[5] = b0
pos_uc[6] = b1; pos_uc[7] = b1; pos_uc[8] = 0.0
pos_uc[9] = b2; pos_uc[10] = b2; pos_uc[11] = b0
pos_uc[12] = b1; pos_uc[13] = 0.0; pos_uc[14] = b1
pos_uc[15] = 0.0; pos_uc[16] = b1; pos_uc[17] = b1
pos_uc[18] = b2; pos_uc[19] = b0; pos_uc[20] = b2
pos_uc[21] = b0; pos_uc[22] = b2; pos_uc[23] = b2
# atom charge (this should be zero)
q_uc = args.charge1
# handle PBCs
if args.pbcx:
iStop = args.NCells[0]
else:
iStop = args.NCells[0] + 1
if args.pbcy:
jStop = args.NCells[1]
else:
jStop = args.NCells[1] + 1
if args.pbcz:
kStop = args.NCells[2]
else:
kStop = args.NCells[2] + 1
# lattice dimensions
dims = [a0*args.NCells[0], a0*args.NCells[1], a0*args.NCells[2]]
# lattice structure
lattice = Lattice()
# set dimensions
lattice.setDims(dims)
# create specie list
lattice.addSpecie(args.sym1)
lattice.specie = np.zeros(iStop*jStop*kStop*8, dtype=np.int32)
lattice.charge = np.zeros(iStop*jStop*kStop*8, dtype=np.float64)
lattice.pos = np.zeros((iStop*jStop*kStop*8*3), dtype=np.float64)
# generate lattice
count = 0
totalQ = 0.0
for i in range(iStop):
ifac = i * a0
for j in range(jStop):
jfac = j * a0
for k in range(kStop):
kfac = k * a0
for l in range(8):
# position of new atom
l3 = 3 * l
rx_tmp = pos_uc[l3 ] + ifac
ry_tmp = pos_uc[l3 + 1] + jfac
rz_tmp = pos_uc[l3 + 2] + kfac
# skip if outside lattice (ie when making extra cell to get surface for non-periodic boundaries)
if (rx_tmp > dims[0]+0.0001) or (ry_tmp > dims[1]+0.0001) or (rz_tmp > dims[2]+0.0001):
continue
# add to lattice structure
#lattice.addAtom(sym_uc[l], (rx_tmp, ry_tmp, rz_tmp), q_uc[l])
specInd = lattice.getSpecieIndex(sym_uc)
lattice.specieCount[specInd] += 1
#pos = np.asarray((rx_tmp, ry_tmp, rz_tmp), dtype=np.float64)
#lattice.atomID = np.append(lattice.atomID, np.int32(count+1))
#lattice.specie = np.append(lattice.specie, np.int32(specInd))
#lattice.pos = np.append(lattice.pos, pos)
#lattice.charge = np.append(lattice.charge, np.float64(q_uc[l]))
lattice.specie[count] = np.int32(specInd)
lattice.pos[count*3] = np.float64(rx_tmp)
lattice.pos[count*3+1] = np.float64(ry_tmp)
lattice.pos[count*3+2] = np.float64(rz_tmp)
lattice.charge[count] = np.float64(q_uc)
totalQ += q_uc
count += 1
lattice.NAtoms = count
# cut trailing zero's if reqired
if(count != len(lattice.specie) ):
lattice.specie = lattice.specie[0:count]
lattice.charge = lattice.charge[0:count]
lattice.pos = lattice.pos[0:count*3]
# min/max pos
for i in range(3):
lattice.minPos[i] = np.min(lattice.pos[i::3])
lattice.maxPos[i] = np.max(lattice.pos[i::3])
# atom ID
lattice.atomID = np.arange(1, lattice.NAtoms + 1, dtype=np.int32)
# periodic boundaries
lattice.PBC[0] = int(args.pbcx)
lattice.PBC[1] = int(args.pbcy)
lattice.PBC[2] = int(args.pbcz)
logger.info(" Number of atoms: %d", lattice.NAtoms)
logger.info(" Dimensions: %s", str(dims))
logger.info(" Total charge: %f", totalQ)
# sort out charges with fixed boundaries
if not args.pbcx and not args.pbcy and not args.pbcz:
if args.charge1 != 0.0:
logger.info("Fixing charges on fixed boundaries")
totalQ = lattice_gen_utils.fixChargesOnFixedBoundaries(lattice)
logger.info(" Total charge after modification: %f", totalQ)
return 0, lattice
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
HTTP Proxy Server in Python.
:copyright: (c) 2013-2018 by Abhinav Singh.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import errno
import base64
import socket
import select
import logging
import argparse
import datetime
import threading
import json
from collections import namedtuple
if os.name != 'nt':
import resource
VERSION = (0, 3)
__version__ = '.'.join(map(str, VERSION[0:2]))
__description__ = 'Lightweight HTTP, HTTPS, WebSockets Proxy Server in a single Python file'
__author__ = 'Abhinav Singh'
__author_email__ = 'mailsforabhinav@gmail.com'
__homepage__ = 'https://github.com/abhinavsingh/proxy.py'
__download_url__ = '%s/archive/master.zip' % __homepage__
__license__ = 'BSD'
logger = logging.getLogger(__name__)
PY3 = sys.version_info[0] == 3
if PY3: # pragma: no cover
text_type = str
binary_type = bytes
from urllib import parse as urlparse
else: # pragma: no cover
text_type = unicode
binary_type = str
import urlparse
def text_(s, encoding='utf-8', errors='strict'): # pragma: no cover
"""Utility to ensure text-like usability.
If ``s`` is an instance of ``binary_type``, return
``s.decode(encoding, errors)``, otherwise return ``s``"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
return s
def bytes_(s, encoding='utf-8', errors='strict'): # pragma: no cover
"""Utility to ensure binary-like usability.
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``s``"""
if isinstance(s, text_type):
return s.encode(encoding, errors)
return s
version = bytes_(__version__)
CRLF, COLON, SP = b'\r\n', b':', b' '
PROXY_AGENT_HEADER = b'Proxy-agent: proxy.py v' + version
PROXY_TUNNEL_ESTABLISHED_RESPONSE_PKT = CRLF.join([
b'HTTP/1.1 200 Connection established',
PROXY_AGENT_HEADER,
CRLF
])
BAD_GATEWAY_RESPONSE_PKT = CRLF.join([
b'HTTP/1.1 502 Bad Gateway',
PROXY_AGENT_HEADER,
b'Content-Length: 11',
b'Connection: close',
CRLF
]) + b'Bad Gateway'
PROXY_AUTHENTICATION_REQUIRED_RESPONSE_PKT = CRLF.join([
b'HTTP/1.1 407 Proxy Authentication Required',
PROXY_AGENT_HEADER,
b'Content-Length: 29',
b'Connection: close',
CRLF
]) + b'Proxy Authentication Required'
class ChunkParser(object):
"""HTTP chunked encoding response parser."""
states = namedtuple('ChunkParserStates', (
'WAITING_FOR_SIZE',
'WAITING_FOR_DATA',
'COMPLETE'
))(1, 2, 3)
def __init__(self):
self.state = ChunkParser.states.WAITING_FOR_SIZE
self.body = b'' # Parsed chunks
self.chunk = b'' # Partial chunk received
self.size = None # Expected size of next following chunk
def parse(self, data):
more = True if len(data) > 0 else False
while more:
more, data = self.process(data)
def process(self, data):
if self.state == ChunkParser.states.WAITING_FOR_SIZE:
# Consume prior chunk in buffer
# in case chunk size without CRLF was received
data = self.chunk + data
self.chunk = b''
# Extract following chunk data size
line, data = HttpParser.split(data)
if not line: # CRLF not received
self.chunk = data
data = b''
else:
self.size = int(line, 16)
self.state = ChunkParser.states.WAITING_FOR_DATA
elif self.state == ChunkParser.states.WAITING_FOR_DATA:
remaining = self.size - len(self.chunk)
self.chunk += data[:remaining]
data = data[remaining:]
if len(self.chunk) == self.size:
if data[:len(CRLF)] == CRLF:
data = data[len(CRLF):]
self.body += self.chunk
if self.size == 0:
self.body += data
data = b''
self.state = ChunkParser.states.COMPLETE
else:
self.state = ChunkParser.states.WAITING_FOR_SIZE
self.chunk = b''
self.size = None
return len(data) > 0, data
class HttpParser(object):
"""HTTP request/response parser."""
states = namedtuple('HttpParserStates', (
'INITIALIZED',
'LINE_RCVD',
'RCVING_HEADERS',
'HEADERS_COMPLETE',
'RCVING_BODY',
'COMPLETE'))(1, 2, 3, 4, 5, 6)
types = namedtuple('HttpParserTypes', (
'REQUEST_PARSER',
'RESPONSE_PARSER'
))(1, 2)
def __init__(self, parser_type):
assert parser_type in (HttpParser.types.REQUEST_PARSER, HttpParser.types.RESPONSE_PARSER)
self.type = parser_type
self.state = HttpParser.states.INITIALIZED
self.raw = b''
self.buffer = b''
self.headers = dict()
self.body = None
self.method = None
self.url = None
self.code = None
self.reason = None
self.version = None
self.chunk_parser = None
def is_chunked_encoded_response(self):
return self.type == HttpParser.types.RESPONSE_PARSER and \
b'transfer-encoding' in self.headers and \
self.headers[b'transfer-encoding'][1].lower() == b'chunked'
def parse(self, data):
self.raw += data
data = self.buffer + data
self.buffer = b''
more = True if len(data) > 0 else False
while more:
more, data = self.process(data)
self.buffer = data
def process(self, data):
if self.state in (HttpParser.states.HEADERS_COMPLETE,
HttpParser.states.RCVING_BODY,
HttpParser.states.COMPLETE) and \
(self.method == b'POST' or self.type == HttpParser.types.RESPONSE_PARSER):
if not self.body:
self.body = b''
if b'content-length' in self.headers:
self.state = HttpParser.states.RCVING_BODY
self.body += data
if len(self.body) >= int(self.headers[b'content-length'][1]):
self.state = HttpParser.states.COMPLETE
elif self.is_chunked_encoded_response():
if not self.chunk_parser:
self.chunk_parser = ChunkParser()
self.chunk_parser.parse(data)
if self.chunk_parser.state == ChunkParser.states.COMPLETE:
self.body = self.chunk_parser.body
self.state = HttpParser.states.COMPLETE
return False, b''
line, data = HttpParser.split(data)
if line is False:
return line, data
if self.state == HttpParser.states.INITIALIZED:
self.process_line(line)
elif self.state in (HttpParser.states.LINE_RCVD, HttpParser.states.RCVING_HEADERS):
self.process_header(line)
# When connect request is received without a following host header
# See `TestHttpParser.test_connect_request_without_host_header_request_parse` for details
if self.state == HttpParser.states.LINE_RCVD and \
self.type == HttpParser.types.REQUEST_PARSER and \
self.method == b'CONNECT' and \
data == CRLF:
self.state = HttpParser.states.COMPLETE
# When raw request has ended with \r\n\r\n and no more http headers are expected
# See `TestHttpParser.test_request_parse_without_content_length` and
# `TestHttpParser.test_response_parse_without_content_length` for details
elif self.state == HttpParser.states.HEADERS_COMPLETE and \
self.type == HttpParser.types.REQUEST_PARSER and \
self.method != b'POST' and \
self.raw.endswith(CRLF * 2):
self.state = HttpParser.states.COMPLETE
elif self.state == HttpParser.states.HEADERS_COMPLETE and \
self.type == HttpParser.types.REQUEST_PARSER and \
self.method == b'POST' and \
(b'content-length' not in self.headers or
(b'content-length' in self.headers and
int(self.headers[b'content-length'][1]) == 0)) and \
self.raw.endswith(CRLF * 2):
self.state = HttpParser.states.COMPLETE
return len(data) > 0, data
def process_line(self, data):
line = data.split(SP)
if self.type == HttpParser.types.REQUEST_PARSER:
self.method = line[0].upper()
self.url = urlparse.urlsplit(line[1])
self.version = line[2]
else:
self.version = line[0]
self.code = line[1]
self.reason = b' '.join(line[2:])
self.state = HttpParser.states.LINE_RCVD
def process_header(self, data):
if len(data) == 0:
if self.state == HttpParser.states.RCVING_HEADERS:
self.state = HttpParser.states.HEADERS_COMPLETE
elif self.state == HttpParser.states.LINE_RCVD:
self.state = HttpParser.states.RCVING_HEADERS
else:
self.state = HttpParser.states.RCVING_HEADERS
parts = data.split(COLON)
key = parts[0].strip()
value = COLON.join(parts[1:]).strip()
self.headers[key.lower()] = (key, value)
def build_url(self):
if not self.url:
return b'/None'
url = self.url.path
if url == b'':
url = b'/'
if not self.url.query == b'':
url += b'?' + self.url.query
if not self.url.fragment == b'':
url += b'#' + self.url.fragment
return url
def build(self, del_headers=None, add_headers=None):
req = b' '.join([self.method, self.build_url(), self.version])
req += CRLF
if not del_headers:
del_headers = []
for k in self.headers:
if k not in del_headers:
req += self.build_header(self.headers[k][0], self.headers[k][1]) + CRLF
if not add_headers:
add_headers = []
for k in add_headers:
req += self.build_header(k[0], k[1]) + CRLF
req += CRLF
if self.body:
req += self.body
return req
@staticmethod
def build_header(k, v):
return k + b': ' + v
@staticmethod
def split(data):
pos = data.find(CRLF)
if pos == -1:
return False, data
line = data[:pos]
data = data[pos + len(CRLF):]
return line, data
class Connection(object):
"""TCP server/client connection abstraction."""
def __init__(self, what):
self.conn = None
self.buffer = b''
self.closed = False
self.what = what # server or client
def send(self, data):
# TODO: Gracefully handle BrokenPipeError exceptions
return self.conn.send(data)
def recv(self, bufsiz=8192):
try:
data = self.conn.recv(bufsiz)
if len(data) == 0:
logger.debug('rcvd 0 bytes from %s' % self.what)
return None
logger.debug('rcvd %d bytes from %s' % (len(data), self.what))
return data
except Exception as e:
if e.errno == errno.ECONNRESET:
logger.debug('%r' % e)
else:
logger.exception(
'Exception while receiving from connection %s %r with reason %r' % (self.what, self.conn, e))
return None
def close(self):
self.conn.close()
self.closed = True
def buffer_size(self):
return len(self.buffer)
def has_buffer(self):
return self.buffer_size() > 0
def queue(self, data):
self.buffer += data
def flush(self):
sent = self.send(self.buffer)
self.buffer = self.buffer[sent:]
logger.debug('flushed %d bytes to %s' % (sent, self.what))
class Server(Connection):
"""Establish connection to destination server."""
def __init__(self, host, port, host_resolver=None):
super(Server, self).__init__(b'server')
# Hostname IP resolution.
if host_resolver and host in host_resolver:
host = host_resolver[host]
self.addr = (host, int(port))
def __del__(self):
if self.conn:
self.close()
def connect(self):
self.conn = socket.create_connection((self.addr[0], self.addr[1]))
class Client(Connection):
"""Accepted client connection."""
def __init__(self, conn, addr):
super(Client, self).__init__(b'client')
self.conn = conn
self.addr = addr
class ProxyError(Exception):
pass
class ProxyConnectionFailed(ProxyError):
def __init__(self, host, port, reason):
self.host = host
self.port = port
self.reason = reason
def __str__(self):
return '<ProxyConnectionFailed - %s:%s - %s>' % (self.host, self.port, self.reason)
class ProxyAuthenticationFailed(ProxyError):
pass
class Proxy(threading.Thread):
"""HTTP proxy implementation.
Accepts `Client` connection object and act as a proxy between client and server.
"""
def __init__(self, client, auth_code=None, server_recvbuf_size=8192, client_recvbuf_size=8192, host_resolver=None):
super(Proxy, self).__init__()
self.start_time = self._now()
self.last_activity = self.start_time
self.auth_code = auth_code
self.client = client
self.client_recvbuf_size = client_recvbuf_size
self.server = None
self.server_recvbuf_size = server_recvbuf_size
self.host_resolver = host_resolver
self.request = HttpParser(HttpParser.types.REQUEST_PARSER)
self.response = HttpParser(HttpParser.types.RESPONSE_PARSER)
@staticmethod
def _now():
return datetime.datetime.utcnow()
def _inactive_for(self):
return (self._now() - self.last_activity).seconds
def _is_inactive(self):
return self._inactive_for() > 30
def _process_request(self, data):
# once we have connection to the server
# we don't parse the http request packets
# any further, instead just pipe incoming
# data from client to server
if self.server and not self.server.closed:
self.server.queue(data)
return
# parse http request
self.request.parse(data)
# once http request parser has reached the state complete
# we attempt to establish connection to destination server
if self.request.state == HttpParser.states.COMPLETE:
logger.debug('request parser is in state complete')
if self.auth_code:
if b'proxy-authorization' not in self.request.headers or \
self.request.headers[b'proxy-authorization'][1] != self.auth_code:
raise ProxyAuthenticationFailed()
if self.request.method == b'CONNECT':
host, port = self.request.url.path.split(COLON)
elif self.request.url:
host, port = self.request.url.hostname, self.request.url.port if self.request.url.port else 80
else:
raise Exception('Invalid request\n%s' % self.request.raw)
self.server = Server(host, port, self.host_resolver)
try:
logger.debug('connecting to server %s:%s' % (host, port))
self.server.connect()
logger.debug('connected to server %s:%s' % (host, port))
except Exception as e: # TimeoutError, socket.gaierror
self.server.closed = True
raise ProxyConnectionFailed(host, port, repr(e))
# for http connect methods (https requests)
# queue appropriate response for client
# notifying about established connection
if self.request.method == b'CONNECT':
self.client.queue(PROXY_TUNNEL_ESTABLISHED_RESPONSE_PKT)
# for usual http requests, re-build request packet
# and queue for the server with appropriate headers
else:
self.server.queue(self.request.build(
del_headers=[b'proxy-authorization', b'proxy-connection', b'connection', b'keep-alive'],
add_headers=[(b'Via', b'1.1 proxy.py v%s' % version), (b'Connection', b'Close')]
))
def _process_response(self, data):
# parse incoming response packet
# only for non-https requests
if not self.request.method == b'CONNECT':
self.response.parse(data)
# queue data for client
self.client.queue(data)
def _access_log(self):
host, port = self.server.addr if self.server else (None, None)
if self.request.method == b'CONNECT':
logger.info(
'%s:%s - %s %s:%s' % (self.client.addr[0], self.client.addr[1], self.request.method, host, port))
elif self.request.method:
logger.info('%s:%s - %s %s:%s%s - %s %s - %s bytes' % (
self.client.addr[0], self.client.addr[1], self.request.method, host, port, self.request.build_url(),
self.response.code, self.response.reason, len(self.response.raw)))
def _get_waitable_lists(self):
rlist, wlist, xlist = [self.client.conn], [], []
if self.client.has_buffer():
wlist.append(self.client.conn)
if self.server and not self.server.closed:
rlist.append(self.server.conn)
if self.server and not self.server.closed and self.server.has_buffer():
wlist.append(self.server.conn)
return rlist, wlist, xlist
def _process_wlist(self, w):
if self.client.conn in w:
logger.debug('client is ready for writes, flushing client buffer')
self.client.flush()
if self.server and not self.server.closed and self.server.conn in w:
logger.debug('server is ready for writes, flushing server buffer')
self.server.flush()
def _process_rlist(self, r):
"""Returns True if connection to client must be closed."""
if self.client.conn in r:
logger.debug('client is ready for reads, reading')
data = self.client.recv(self.client_recvbuf_size)
self.last_activity = self._now()
if not data:
logger.debug('client closed connection, breaking')
return True
try:
self._process_request(data)
except (ProxyAuthenticationFailed, ProxyConnectionFailed) as e:
logger.exception(e)
self.client.queue(Proxy._get_response_pkt_by_exception(e))
self.client.flush()
return True
if self.server and not self.server.closed and self.server.conn in r:
logger.debug('server is ready for reads, reading')
data = self.server.recv(self.server_recvbuf_size)
self.last_activity = self._now()
if not data:
logger.debug('server closed connection')
self.server.close()
self.request = HttpParser(HttpParser.types.REQUEST_PARSER)
else:
self._process_response(data)
return False
def _process(self):
while True:
rlist, wlist, xlist = self._get_waitable_lists()
r, w, x = select.select(rlist, wlist, xlist, 1)
self._process_wlist(w)
if self._process_rlist(r):
break
if self.client.buffer_size() == 0:
if self.response.state == HttpParser.states.COMPLETE:
logger.debug('client buffer is empty and response state is complete, breaking')
break
if self._is_inactive():
logger.debug('client buffer is empty and maximum inactivity has reached, breaking')
break
@staticmethod
def _get_response_pkt_by_exception(e):
if e.__class__.__name__ == 'ProxyAuthenticationFailed':
return PROXY_AUTHENTICATION_REQUIRED_RESPONSE_PKT
if e.__class__.__name__ == 'ProxyConnectionFailed':
return BAD_GATEWAY_RESPONSE_PKT
def run(self):
logger.debug('Proxying connection %r' % self.client.conn)
try:
self._process()
except KeyboardInterrupt:
pass
except Exception as e:
logger.exception('Exception while handling connection %r with reason %r' % (self.client.conn, e))
finally:
logger.debug(
'closing client connection with pending client buffer size %d bytes' % self.client.buffer_size())
self.client.close()
if self.server:
logger.debug(
'closed client connection with pending server buffer size %d bytes' % self.server.buffer_size())
self._access_log()
logger.debug('Closing proxy for connection %r at address %r' % (self.client.conn, self.client.addr))
class TCP(object):
"""TCP server implementation.
Subclass MUST implement `handle` method. It accepts an instance of accepted `Client` connection.
"""
def __init__(self, hostname='127.0.0.1', port=8899, backlog=100, client_ips=None):
self.hostname = hostname
self.port = port
self.backlog = backlog
self.client_ips = client_ips
self.socket = None
def handle(self, client):
raise NotImplementedError()
def run(self):
try:
logger.info('Starting server on port %d' % self.port)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((self.hostname, self.port))
self.socket.listen(self.backlog)
while True:
conn, addr = self.socket.accept()
if self.client_ips and addr[0] not in self.client_ips:
logger.warning('Closing socket on rejected client IP %s' % addr[0])
conn.shutdown(socket.SHUT_RDWR)
conn.close()
continue
logger.info('Handling socket on accepted client IP %s' % addr[0])
client = Client(conn, addr)
self.handle(client)
except Exception as e:
logger.exception('Exception while running the server %r' % e)
finally:
logger.info('Closing server socket')
self.socket.close()
class HTTP(TCP):
"""HTTP proxy server implementation.
Spawns new process to proxy accepted client connection.
"""
def __init__(self, hostname='127.0.0.1', port=8899, backlog=100,
auth_code=None, server_recvbuf_size=8192, client_recvbuf_size=8192,
host_resolver=None, client_ips=None):
super(HTTP, self).__init__(hostname, port, backlog, client_ips)
self.auth_code = auth_code
self.client_recvbuf_size = client_recvbuf_size
self.server_recvbuf_size = server_recvbuf_size
self.host_resolver = host_resolver
def handle(self, client):
proxy = Proxy(client,
auth_code=self.auth_code,
server_recvbuf_size=self.server_recvbuf_size,
client_recvbuf_size=self.client_recvbuf_size,
host_resolver=self.host_resolver)
proxy.daemon = True
proxy.start()
def set_open_file_limit(soft_limit):
"""Configure open file description soft limit on supported OS."""
if os.name != 'nt': # resource module not available on Windows OS
curr_soft_limit, curr_hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
if curr_soft_limit < soft_limit < curr_hard_limit:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, curr_hard_limit))
logger.info('Open file descriptor soft limit set to %d' % soft_limit)
def main():
parser = argparse.ArgumentParser(
description='proxy.py v%s' % __version__,
epilog='Having difficulty using proxy.py? Report at: %s/issues/new' % __homepage__
)
parser.add_argument('--hostname', default='127.0.0.1', help='Default: 127.0.0.1')
parser.add_argument('--port', default='8899', help='Default: 8899')
parser.add_argument('--backlog', default='100', help='Default: 100. '
'Maximum number of pending connections to proxy server')
parser.add_argument('--basic-auth', default=None, help='Default: No authentication. '
'Specify colon separated user:password '
'to enable basic authentication.')
parser.add_argument('--server-recvbuf-size', default='8192', help='Default: 8 KB. '
'Maximum amount of data received from the '
'server in a single recv() operation. Bump this '
'value for faster downloads at the expense of '
'increased RAM.')
parser.add_argument('--client-recvbuf-size', default='8192', help='Default: 8 KB. '
'Maximum amount of data received from the '
'client in a single recv() operation. Bump this '
'value for faster uploads at the expense of '
'increased RAM.')
parser.add_argument('--open-file-limit', default='1024', help='Default: 1024. '
'Maximum number of files (TCP connections) '
'that proxy.py can open concurrently.')
parser.add_argument('--log-level', default='INFO', help='DEBUG, INFO (default), WARNING, ERROR, CRITICAL')
parser.add_argument('--host_resolver', default=None, help='Default: No host resolution. '
'JSON hosts file used for hostname IP resolution.')
parser.add_argument('--client_ips', default=None, nargs='*', help='Default: No client IP restriction. '
'The only client IPs that the proxy will accept.')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level),
format='%(asctime)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s')
try:
set_open_file_limit(int(args.open_file_limit))
auth_code = None
if args.basic_auth:
auth_code = b'Basic %s' % base64.b64encode(bytes_(args.basic_auth))
host_resolver = None
if args.host_resolver:
with open(args.host_resolver) as json_file:
host_resolver = json.load(json_file)
proxy = HTTP(hostname=args.hostname,
port=int(args.port),
backlog=int(args.backlog),
auth_code=auth_code,
server_recvbuf_size=int(args.server_recvbuf_size),
client_recvbuf_size=int(args.client_recvbuf_size),
host_resolver=host_resolver,
client_ips=args.client_ips)
proxy.run()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
|
# Copyright 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from oslo_db.sqlalchemy import migration
from six.moves import StringIO
from glance.cmd import manage
from glance.db import migration as db_migration
from glance.db.sqlalchemy import api as db_api
from glance.db.sqlalchemy import metadata as db_metadata
from glance.tests import utils as test_utils
class TestManageBase(test_utils.BaseTestCase):
def setUp(self):
super(TestManageBase, self).setUp()
def clear_conf():
manage.CONF.reset()
manage.CONF.unregister_opt(manage.command_opt)
clear_conf()
self.addCleanup(clear_conf)
self.useFixture(fixtures.MonkeyPatch(
'oslo_log.log.setup', lambda product_name, version='test': None))
patcher = mock.patch('glance.db.sqlalchemy.api.get_engine')
patcher.start()
self.addCleanup(patcher.stop)
def _main_test_helper(self, argv, func_name=None, *exp_args, **exp_kwargs):
self.useFixture(fixtures.MonkeyPatch('sys.argv', argv))
manage.main()
func_name.assert_called_once_with(*exp_args, **exp_kwargs)
class TestLegacyManage(TestManageBase):
@mock.patch.object(migration, 'db_version')
def test_legacy_db_version(self, db_version):
with mock.patch('sys.stdout', new_callable=StringIO):
self._main_test_helper(['glance.cmd.manage', 'db_version'],
migration.db_version,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, 0)
@mock.patch.object(migration, 'db_sync')
def test_legacy_db_sync(self, db_sync):
self._main_test_helper(['glance.cmd.manage', 'db_sync'],
migration.db_sync,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, None)
@mock.patch.object(migration, 'db_sync')
def test_legacy_db_upgrade(self, db_sync):
self._main_test_helper(['glance.cmd.manage', 'db_upgrade'],
migration.db_sync,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, None)
@mock.patch.object(migration, 'db_version_control')
def test_legacy_db_version_control(self, db_version_control):
self._main_test_helper(['glance.cmd.manage', 'db_version_control'],
migration.db_version_control,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, None)
@mock.patch.object(migration, 'db_sync')
def test_legacy_db_sync_version(self, db_sync):
self._main_test_helper(['glance.cmd.manage', 'db_sync', '20'],
migration.db_sync,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, '20')
@mock.patch.object(migration, 'db_sync')
def test_legacy_db_upgrade_version(self, db_sync):
self._main_test_helper(['glance.cmd.manage', 'db_upgrade', '20'],
migration.db_sync,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, '20')
@mock.patch.object(migration, 'db_sync')
def test_legacy_db_downgrade_version(self, db_sync):
self._main_test_helper(['glance.cmd.manage', 'db_downgrade', '20'],
migration.db_sync,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, '20')
def test_db_metadefs_unload(self):
db_metadata.db_unload_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db_unload_metadefs'],
db_metadata.db_unload_metadefs,
db_api.get_engine())
def test_db_metadefs_load(self):
db_metadata.db_load_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs'],
db_metadata.db_load_metadefs,
db_api.get_engine(),
None, None, None, None)
def test_db_metadefs_load_with_specified_path(self):
db_metadata.db_load_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs',
'/mock/'],
db_metadata.db_load_metadefs,
db_api.get_engine(),
'/mock/', None, None, None)
def test_db_metadefs_load_from_path_merge(self):
db_metadata.db_load_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs',
'/mock/', 'True'],
db_metadata.db_load_metadefs,
db_api.get_engine(),
'/mock/', 'True', None, None)
def test_db_metadefs_load_from_merge_and_prefer_new(self):
db_metadata.db_load_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs',
'/mock/', 'True', 'True'],
db_metadata.db_load_metadefs,
db_api.get_engine(),
'/mock/', 'True', 'True', None)
def test_db_metadefs_load_from_merge_and_prefer_new_and_overwrite(self):
db_metadata.db_load_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs',
'/mock/', 'True', 'True', 'True'],
db_metadata.db_load_metadefs,
db_api.get_engine(),
'/mock/', 'True', 'True', 'True')
def test_db_metadefs_export(self):
db_metadata.db_export_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db_export_metadefs'],
db_metadata.db_export_metadefs,
db_api.get_engine(),
None)
def test_db_metadefs_export_with_specified_path(self):
db_metadata.db_export_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db_export_metadefs',
'/mock/'],
db_metadata.db_export_metadefs,
db_api.get_engine(),
'/mock/')
class TestManage(TestManageBase):
@mock.patch.object(migration, 'db_version')
def test_db_version(self, db_version):
with mock.patch('sys.stdout', new_callable=StringIO):
self._main_test_helper(['glance.cmd.manage', 'db', 'version'],
migration.db_version,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, 0)
@mock.patch.object(migration, 'db_sync')
def test_db_sync(self, db_sync):
self._main_test_helper(['glance.cmd.manage', 'db', 'sync'],
migration.db_sync,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, None)
@mock.patch.object(migration, 'db_sync')
def test_db_upgrade(self, db_sync):
self._main_test_helper(['glance.cmd.manage', 'db', 'upgrade'],
migration.db_sync,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, None)
@mock.patch.object(migration, 'db_version_control')
def test_db_version_control(self, db_version_control):
self._main_test_helper(['glance.cmd.manage', 'db', 'version_control'],
migration.db_version_control,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, None)
@mock.patch.object(migration, 'db_sync')
def test_db_sync_version(self, db_sync):
self._main_test_helper(['glance.cmd.manage', 'db', 'sync', '20'],
migration.db_sync,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, '20')
@mock.patch.object(migration, 'db_sync')
def test_db_upgrade_version(self, db_sync):
self._main_test_helper(['glance.cmd.manage', 'db', 'upgrade', '20'],
migration.db_sync,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, '20')
@mock.patch.object(migration, 'db_sync')
def test_db_downgrade_version(self, db_sync):
self._main_test_helper(['glance.cmd.manage', 'db', 'downgrade', '20'],
migration.db_sync,
db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH, '20')
def test_db_metadefs_unload(self):
db_metadata.db_unload_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db', 'unload_metadefs'],
db_metadata.db_unload_metadefs,
db_api.get_engine())
def test_db_metadefs_load(self):
db_metadata.db_load_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs'],
db_metadata.db_load_metadefs,
db_api.get_engine(),
None, False, False, False)
def test_db_metadefs_load_with_specified_path(self):
db_metadata.db_load_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs',
'--path', '/mock/'],
db_metadata.db_load_metadefs,
db_api.get_engine(),
'/mock/', False, False, False)
def test_db_metadefs_load_prefer_new_with_path(self):
db_metadata.db_load_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs',
'--path', '/mock/', '--merge', '--prefer_new'],
db_metadata.db_load_metadefs,
db_api.get_engine(),
'/mock/', True, True, False)
def test_db_metadefs_load_prefer_new(self):
db_metadata.db_load_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs',
'--merge', '--prefer_new'],
db_metadata.db_load_metadefs,
db_api.get_engine(),
None, True, True, False)
def test_db_metadefs_load_overwrite_existing(self):
db_metadata.db_load_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs',
'--merge', '--overwrite'],
db_metadata.db_load_metadefs,
db_api.get_engine(),
None, True, False, True)
def test_db_metadefs_load_prefer_new_and_overwrite_existing(self):
db_metadata.db_load_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs',
'--merge', '--prefer_new', '--overwrite'],
db_metadata.db_load_metadefs,
db_api.get_engine(),
None, True, True, True)
def test_db_metadefs_load_from_path_overwrite_existing(self):
db_metadata.db_load_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs',
'--path', '/mock/', '--merge', '--overwrite'],
db_metadata.db_load_metadefs,
db_api.get_engine(),
'/mock/', True, False, True)
def test_db_metadefs_export(self):
db_metadata.db_export_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db', 'export_metadefs'],
db_metadata.db_export_metadefs,
db_api.get_engine(),
None)
def test_db_metadefs_export_with_specified_path(self):
db_metadata.db_export_metadefs = mock.Mock()
self._main_test_helper(['glance.cmd.manage', 'db', 'export_metadefs',
'--path', '/mock/'],
db_metadata.db_export_metadefs,
db_api.get_engine(),
'/mock/')
|
|
import datetime
import warnings
from django.forms.utils import flatatt, pretty_name
from django.forms.widgets import Textarea, TextInput
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.html import conditional_escape, format_html, html_safe
from django.utils.inspect import func_supports_parameter
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
__all__ = ('BoundField',)
@html_safe
class BoundField:
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ''
def __str__(self):
"""Render this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
@cached_property
def subwidgets(self):
"""
Most widgets yield a single subwidget, but others like RadioSelect and
CheckboxSelectMultiple produce one subwidget for each choice.
This property is cached so that only one database query occurs when
rendering ModelChoiceFields.
"""
id_ = self.field.widget.attrs.get('id') or self.auto_id
attrs = {'id': id_} if id_ else {}
attrs = self.build_widget_attrs(attrs)
return list(
BoundWidget(self.field.widget, widget, self.form.renderer)
for widget in self.field.widget.subwidgets(self.html_name, self.value(), attrs=attrs)
)
def __bool__(self):
# BoundField evaluates to True even if it doesn't have subwidgets.
return True
def __iter__(self):
return iter(self.subwidgets)
def __len__(self):
return len(self.subwidgets)
def __getitem__(self, idx):
# Prevent unnecessary reevaluation when accessing BoundField's attrs
# from templates.
if not isinstance(idx, (int, slice)):
raise TypeError
return self.subwidgets[idx]
@property
def errors(self):
"""
Return an ErrorList (empty if there are no errors) for this field.
"""
return self.form.errors.get(self.name, self.form.error_class())
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Render the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If a widget isn't specified, use the
field's default widget.
"""
if not widget:
widget = self.field.widget
if self.field.localize:
widget.is_localized = True
attrs = attrs or {}
attrs = self.build_widget_attrs(attrs, widget)
auto_id = self.auto_id
if auto_id and 'id' not in attrs and 'id' not in widget.attrs:
if not only_initial:
attrs['id'] = auto_id
else:
attrs['id'] = self.html_initial_id
if not only_initial:
name = self.html_name
else:
name = self.html_initial_name
kwargs = {}
if func_supports_parameter(widget.render, 'renderer'):
kwargs['renderer'] = self.form.renderer
else:
warnings.warn(
'Add the `renderer` argument to the render() method of %s. '
'It will be mandatory in Django 2.1.' % widget.__class__,
RemovedInDjango21Warning, stacklevel=2,
)
return widget.render(
name=name,
value=self.value(),
attrs=attrs,
**kwargs
)
def as_text(self, attrs=None, **kwargs):
"""
Return a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"""Return a string of HTML for representing this as a <textarea>."""
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Return a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
@property
def data(self):
"""
Return the data for this BoundField, or None if it wasn't given.
"""
return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name)
def value(self):
"""
Return the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
data = self.initial
if self.form.is_bound:
data = self.field.bound_data(self.data, data)
return self.field.prepare_value(data)
def label_tag(self, contents=None, attrs=None, label_suffix=None):
"""
Wrap the given contents in a <label>, if the field has an ID attribute.
contents should be mark_safe'd to avoid HTML escaping. If contents
aren't given, use the field's HTML-escaped label.
If attrs are given, use them as HTML attributes on the <label> tag.
label_suffix overrides the form's label_suffix.
"""
contents = contents or self.label
if label_suffix is None:
label_suffix = (self.field.label_suffix if self.field.label_suffix is not None
else self.form.label_suffix)
# Only add the suffix if the label does not end in punctuation.
# Translators: If found as last label character, these punctuation
# characters will prevent the default label_suffix to be appended to the label
if label_suffix and contents and contents[-1] not in _(':?.!'):
contents = format_html('{}{}', contents, label_suffix)
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
if id_:
id_for_label = widget.id_for_label(id_)
if id_for_label:
attrs = dict(attrs or {}, **{'for': id_for_label})
if self.field.required and hasattr(self.form, 'required_css_class'):
attrs = attrs or {}
if 'class' in attrs:
attrs['class'] += ' ' + self.form.required_css_class
else:
attrs['class'] = self.form.required_css_class
attrs = flatatt(attrs) if attrs else ''
contents = format_html('<label{}>{}</label>', attrs, contents)
else:
contents = conditional_escape(contents)
return mark_safe(contents)
def css_classes(self, extra_classes=None):
"""
Return a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, 'split'):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, 'error_css_class'):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, 'required_css_class'):
extra_classes.add(self.form.required_css_class)
return ' '.join(extra_classes)
@property
def is_hidden(self):
"""Return True if this BoundField's widget is hidden."""
return self.field.widget.is_hidden
@property
def auto_id(self):
"""
Calculate and return the ID attribute for this BoundField, if the
associated Form has specified auto_id. Return an empty string otherwise.
"""
auto_id = self.form.auto_id
if auto_id and '%s' in force_text(auto_id):
return force_text(auto_id) % self.html_name
elif auto_id:
return self.html_name
return ''
@property
def id_for_label(self):
"""
Wrapper around the field widget's `id_for_label` method.
Useful, for example, for focusing on this field regardless of whether
it has a single widget or a MultiWidget.
"""
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
return widget.id_for_label(id_)
@cached_property
def initial(self):
data = self.form.get_initial_for_field(self.field, self.name)
# If this is an auto-generated default date, nix the microseconds for
# standardized handling. See #22502.
if (isinstance(data, (datetime.datetime, datetime.time)) and
not self.field.widget.supports_microseconds):
data = data.replace(microsecond=0)
return data
def build_widget_attrs(self, attrs, widget=None):
if not widget:
widget = self.field.widget
attrs = dict(attrs) # Copy attrs to avoid modifying the argument.
if widget.use_required_attribute(self.initial) and self.field.required and self.form.use_required_attribute:
attrs['required'] = True
if self.field.disabled:
attrs['disabled'] = True
return attrs
@html_safe
class BoundWidget:
"""
A container class used for iterating over widgets. This is useful for
widgets that have choices. For example, the following can be used in a
template:
{% for radio in myform.beatles %}
<label for="{{ radio.id_for_label }}">
{{ radio.choice_label }}
<span class="radio">{{ radio.tag }}</span>
</label>
{% endfor %}
"""
def __init__(self, parent_widget, data, renderer):
self.parent_widget = parent_widget
self.data = data
self.renderer = renderer
def __str__(self):
return self.tag(wrap_label=True)
def tag(self, wrap_label=False):
context = {'widget': self.data, 'wrap_label': wrap_label}
return self.parent_widget._render(self.template_name, context, self.renderer)
@property
def template_name(self):
if 'template_name' in self.data:
return self.data['template_name']
return self.parent_widget.template_name
@property
def id_for_label(self):
return 'id_%s_%s' % (self.data['name'], self.data['index'])
@property
def choice_label(self):
return self.data['label']
|
|
#!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the executables produced by gitian only contain
certain symbols and are only linked against allowed libraries.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py
'''
import subprocess
import re
import sys
import os
from typing import List, Optional, Tuple
# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases
#
# - g++ version 4.9.2 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.19 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=libc6)
#
# Ubuntu 16.04 (Xenial) EOL: 2024. https://wiki.ubuntu.com/Releases
#
# - g++ version 5.3.1 (https://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=xenial§ion=all)
# - libc version 2.23.0 (https://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=xenial§ion=all)
#
# CentOS 7 EOL: 2024. https://wiki.centos.org/FAQ/General
#
# - g++ version 4.8.5 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
# - libc version 2.17 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.8.5: GCC_4.8.0
# (glibc) GLIBC_2_17
#
MAX_VERSIONS = {
'GCC': (4,8,0),
'GLIBC': (2,17),
'LIBATOMIC': (1,0)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr',
'environ', '_environ', '__environ',
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
OTOOL_CMD = os.getenv('OTOOL', '/usr/bin/otool')
# Allowed NEEDED libraries
ELF_ALLOWED_LIBRARIES = {
# bitcoind and bitcoin-qt
'libgcc_s.so.1', # GCC base support
'libc.so.6', # C library
'libpthread.so.0', # threading
'libm.so.6', # math library
'librt.so.1', # real-time (clock)
'libatomic.so.1',
'ld-linux-x86-64.so.2', # 64-bit dynamic linker
'ld-linux.so.2', # 32-bit dynamic linker
'ld-linux-aarch64.so.1', # 64-bit ARM dynamic linker
'ld-linux-armhf.so.3', # 32-bit ARM dynamic linker
'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker
# bitcoin-qt only
'libxcb.so.1', # part of X11
'libfontconfig.so.1', # font support
'libfreetype.so.6', # font parsing
'libdl.so.2' # programming interface to dynamic linker
}
ARCH_MIN_GLIBC_VER = {
'80386': (2,1),
'X86-64': (2,2,5),
'ARM': (2,4),
'AArch64':(2,17),
'RISC-V': (2,27)
}
MACHO_ALLOWED_LIBRARIES = {
# bitcoind and bitcoin-qt
'libc++.1.dylib', # C++ Standard Library
'libSystem.B.dylib', # libc, libm, libpthread, libinfo
# bitcoin-qt only
'AppKit', # user interface
'ApplicationServices', # common application tasks.
'Carbon', # deprecated c back-compat API
'CoreFoundation', # low level func, data types
'CoreGraphics', # 2D rendering
'CoreServices', # operating system services
'CoreText', # interface for laying out text and handling fonts.
'Foundation', # base layer functionality for apps/frameworks
'ImageIO', # read and write image file formats.
'IOKit', # user-space access to hardware devices and drivers.
'libobjc.A.dylib', # Objective-C runtime library
'Metal', # 3D graphics
'Security', # access control and authentication
'QuartzCore', # animation
}
PE_ALLOWED_LIBRARIES = {
'ADVAPI32.dll', # security & registry
'IPHLPAPI.DLL', # IP helper API
'KERNEL32.dll', # win32 base APIs
'msvcrt.dll', # C standard library for MSVC
'SHELL32.dll', # shell API
'USER32.dll', # user interface
'WS2_32.dll', # sockets
# bitcoin-qt only
'dwmapi.dll', # desktop window manager
'GDI32.dll', # graphics device interface
'IMM32.dll', # input method editor
'ole32.dll', # component object model
'OLEAUT32.dll', # OLE Automation API
'SHLWAPI.dll', # light weight shell API
'UxTheme.dll',
'VERSION.dll', # version checking
'WINMM.dll', # WinMM audio API
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True) -> List[Tuple[str, str, str]]:
'''
Parse an ELF executable and return a list of (symbol,version, arch) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', '-h', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for {}: {}'.format(executable, stderr.strip()))
syms = []
for line in stdout.splitlines():
line = line.split()
if 'Machine:' in line:
arch = line[-1]
if len(line)>7 and re.match('[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition('@')
is_import = line[6] == 'UND'
if version.startswith('@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version, arch))
return syms
def check_version(max_versions, version, arch) -> bool:
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib] or lib == 'GLIBC' and ver <= ARCH_MIN_GLIBC_VER[arch]
def elf_read_libraries(filename) -> List[str]:
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>2 and tokens[1] == '(NEEDED)':
match = re.match(r'^Shared library: \[(.*)\]$', ' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
def check_imported_symbols(filename) -> bool:
cppfilt = CPPFilt()
ok = True
for sym, version, arch in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version, arch):
print('{}: symbol {} from unsupported version {}'.format(filename, cppfilt(sym), version))
ok = False
return ok
def check_exported_symbols(filename) -> bool:
cppfilt = CPPFilt()
ok = True
for sym,version,arch in read_symbols(filename, False):
if arch == 'RISC-V' or sym in IGNORE_EXPORTS:
continue
print('{}: export of symbol {} not allowed'.format(filename, cppfilt(sym)))
ok = False
return ok
def check_ELF_libraries(filename) -> bool:
ok = True
for library_name in elf_read_libraries(filename):
if library_name not in ELF_ALLOWED_LIBRARIES:
print('{}: NEEDED library {} is not allowed'.format(filename, library_name))
ok = False
return ok
def macho_read_libraries(filename) -> List[str]:
p = subprocess.Popen([OTOOL_CMD, '-L', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
tokens = line.split()
if len(tokens) == 1: # skip executable name
continue
libraries.append(tokens[0].split('/')[-1])
return libraries
def check_MACHO_libraries(filename) -> bool:
ok = True
for dylib in macho_read_libraries(filename):
if dylib not in MACHO_ALLOWED_LIBRARIES:
print('{} is not in ALLOWED_LIBRARIES!'.format(dylib))
ok = False
return ok
def pe_read_libraries(filename) -> List[str]:
p = subprocess.Popen([OBJDUMP_CMD, '-x', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
if 'DLL Name:' in line:
tokens = line.split(': ')
libraries.append(tokens[1])
return libraries
def check_PE_libraries(filename) -> bool:
ok = True
for dylib in pe_read_libraries(filename):
if dylib not in PE_ALLOWED_LIBRARIES:
print('{} is not in ALLOWED_LIBRARIES!'.format(dylib))
ok = False
return ok
CHECKS = {
'ELF': [
('IMPORTED_SYMBOLS', check_imported_symbols),
('EXPORTED_SYMBOLS', check_exported_symbols),
('LIBRARY_DEPENDENCIES', check_ELF_libraries)
],
'MACHO': [
('DYNAMIC_LIBRARIES', check_MACHO_libraries)
],
'PE' : [
('DYNAMIC_LIBRARIES', check_PE_libraries)
]
}
def identify_executable(executable) -> Optional[str]:
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
elif magic.startswith(b'\xcf\xfa'):
return 'MACHO'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('{}: unknown format'.format(filename))
retval = 1
continue
failed = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
if failed:
print('{}: failed {}'.format(filename, ' '.join(failed)))
retval = 1
except IOError:
print('{}: cannot open'.format(filename))
retval = 1
sys.exit(retval)
|
|
import re
UNITS = [
'GRAM',
'KILOGRAM',
'OUNCE',
'POUND',
'TEASPOON',
'TABLESPOON',
'GALLON',
'LITER'
]
UNIT_MAP = {
'lb': 'POUND',
'Lb': 'POUND',
'LB': 'POUND',
'oz': 'OUNCE',
'Oz': 'OUNCE',
'OZ': 'OUNCE',
'g': 'GRAM',
'kg': 'KILOGRAM',
't': 'TEASPOON',
'ts': 'TEASPOON',
'tsp': 'TEASPOON',
'Tsp': 'TEASPOON',
'tspn': 'TEASPOON',
'tbs': 'TABLESPOON',
'tbsp': 'TABLESPOON',
'tblsp': 'TABLESPOON',
'tblspn': 'TABLESPOON',
'Tbsp': 'TABLESPOON',
'T': 'TABLESPOON',
'G': 'GALLON',
'gal': 'GALLON',
'Gal': 'GALLON',
'l': 'LITER',
'L': 'LITER'
}
def to_num(value):
"""
Coerce a string to a "Number". Adapted from
``formencode.validators.Number``.
"""
try:
value = float(value)
try:
int_value = int(value)
except OverflowError:
int_value = None
if value == int_value:
return int_value
return value
except ValueError:
return None
class UnitException(Exception):
pass
class InvalidUnitException(UnitException):
pass
class PoundOunceMerge(object):
signature = ["POUND", "OUNCE"]
@classmethod
def merge(cls, pounds, ounces):
return (pounds[0] + (ounces[0] / 16), 'POUND')
class OunceMerge(object):
signature = ["OUNCE"]
@classmethod
def merge(cls, ounces):
return (ounces[0] / 16, 'POUND')
class GramMerge(object):
signature = ["GRAM"]
@classmethod
def merge(cls, grams):
return (grams[0] / 453.59237, "POUND")
class KilogramMerge(object):
signature = ["KILOGRAM"]
@classmethod
def merge(cls, kilograms):
return (kilograms[0] / .45359237, "POUND")
class PoundExpansion(object):
signature = "POUND"
@classmethod
def expand(cls, pounds):
"""
Attempt to expand POUND units into whole POUND, OUNCE units, e.g.,
5.5 lb == 5 lb, 8 oz
If the unit is less than a pound, just use ounce increments.
"""
#
# If we already have an integer, just return it
#
if int(pounds) == pounds:
return [(pounds, "POUND")]
#
# If we have less than a pound,
# just use ounces.
#
if pounds < 1:
return [(pounds * 16, "OUNCE")]
#
# There's 16 oz in a lb.
# Multiply the weight in pounds by individual
# ounce increments (e.g., 1, 2, 3...).
#
# If the result is roughly an integer,
# we can split into lbs, oz.
#
for i in range(16):
# We're only interested in the fractional part.
decimal = pounds - int(pounds)
if (decimal * 16) == i + 1:
return [(int(pounds), "POUND"), (i + 1, "OUNCE")]
#
# If we find no round fractions,
# just return a pounds in decimal format.
#
return [(pounds, "POUND")]
class UnitConvert(object):
"""
Used to convert from strings to units, e.g.,
from_str("5lb 8oz") == (5.5, 'POUND')
...and back again:
to_str((5.5, 'POUND')) --> "5lb 8oz"
"""
punctuationRe = re.compile('[^0-9a-zA-z.\s]')
unitRe = re.compile('[a-zA-Z]+( \.)?')
@classmethod
def __pairs__(cls, val):
"""
Convert a unit string into a list of pairs
'2.5lb. 6oz' --> [(2.5, 'lb'), (6, 'oz')]
Input should already be stripped of any [^0-9a-zA-Z.] characters.
"""
# Find all unit amounts
amounts = filter(None, cls.unitRe.split(val))
amounts = filter(lambda x: x != '.', amounts)
# Build a regex that matches the amounts
partsRe = '(%s)' % '|'.join(amounts).replace('.', '\.')
#
# Split on the regex, and filter out the amounts,
# leaving only the remainder.
#
parts = re.compile(partsRe).split(val)
units = filter(None, parts)
for a in amounts:
units = [u for u in units if u != a]
# Coerce values into a more usable format
amounts = cls.__coerce_amounts__(amounts)
units = cls.__coerce_units__(units)
return zip(amounts, units)
@classmethod
def __coerce_amounts__(cls, amounts):
# Cast the amounts to float
return [float(a.replace('. ', '')) for a in amounts]
@classmethod
def __coerce_units__(cls, units):
# Filter all punctuation from the units
units = [re.compile('[^a-zA-Z]').sub('', u) for u in units]
# Attempt to standardize units
def unitize(unit):
coerced = None
# Look for exact matches
if unit.upper() in UNITS:
coerced = unit.upper()
# Look for alias matches
if unit in UNIT_MAP:
coerced = UNIT_MAP[unit]
# Look for simple matches on plurality
if unit[-1] == 's' and unit[:-1].upper() in UNITS:
coerced = unit[:-1].upper()
# Look for simple alias matches on plurality
if unit[-1] == 's' and unit[:-1] in UNIT_MAP:
coerced = UNIT_MAP[unit[:-1]]
if coerced not in UNITS:
raise InvalidUnitException('`%s` is not a valid unit' % unit)
return coerced
units = [unitize(u) for u in units]
return units
@classmethod
def from_str(cls, val):
stripped = cls.punctuationRe.sub('', val)
#
# First attempt to interpret a simple int/float
# value and return it. If this fails, continue on
# to normal "<amount> <unit>" parsing.
#
coerced = to_num(stripped)
if coerced is not None:
return coerced, None
#
# Split into pairs of (amount, unit), e.g.,
# [(5.0, 'POUND'), (8.0, 'OUNCE')]
#
pairs = cls.__pairs__(stripped)
#
# Now that we have a list of potential
# unit/amount pairs, attempt to combine
# them into a single unit that makes sense, e.g.,
#
# [(5.0, 'POUND'), (8.0, 'OUNCE')] == (5.5, 'POUND')
#
units = [p[1] for p in pairs]
for mergecls in [
PoundOunceMerge,
OunceMerge,
GramMerge,
KilogramMerge
]:
if mergecls.signature == units:
return mergecls.merge(*pairs)
return pairs[0]
@classmethod
def __str_abbr__(cls, unit):
"""
Abbreviate standard units, e.g.,
"POUND" -> "lb"
"""
unit = str(unit)
_map = {
'GRAM': 'g',
'KILOGRAM': 'kg',
'OUNCE': 'oz',
'POUND': 'lb',
'TEASPOON': 't',
'TABLESPOON': 'T',
'GALLON': 'Gal',
'LITER': 'L'
}
return _map.get(unit, '')
@classmethod
def __str_amount__(cls, amount):
"""
Format amounts for readability, e.g.,
5.0 -> 5
"""
if amount == int(amount):
amount = int(amount)
amount = '%.3f' % amount
# Remove padded zeroes
while amount.endswith('0'):
amount = amount[:-1]
# Remove trailing decimal points
if amount.endswith('.'):
amount = amount[:-1]
return amount
@classmethod
def to_str(cls, amount, unit):
#
# If there's no unit, just
# return the amount
#
if unit is None:
if type(amount) is float and int(amount) == amount:
amount = int(amount)
return str(amount)
pairs = [(amount, unit)]
for expandcls in [
PoundExpansion
]:
if expandcls.signature == unit:
pairs = expandcls.expand(amount)
result = ' '.join([
'%s %s' % (cls.__str_amount__(amount), cls.__str_abbr__(unit))
for amount, unit in pairs if amount
])
#
# If result is an empty string,
# we filtered out all of the "zero"
# ingredients, leaving nothing.
#
# This can happen in circumstances like
# (0, 'POUND'). This scenario is
# special cased.
#
if result == '':
return '0 %s' % cls.__str_abbr__(unit)
return result
def to_us(amount, unit):
"""
Used to convert metric (amount, unit) pairs to U.S. versions, e.g.,
(18.9270589, 'LITER') -> (5, 'GALLON')
"""
if unit == 'KILOGRAM':
return (amount * 2.20462262, 'POUND')
if unit == 'GRAM':
return (amount * 0.0352739619, 'OUNCE')
if unit == 'LITER':
return (amount * 0.264172052, 'GALLON')
return (amount, unit)
def to_metric(amount, unit):
"""
Used to convert common (amount, unit) pairs to metric versions, e.g.,
(5, 'GALLON') -> (18.9270589, 'LITER')
"""
if unit == 'POUND':
kgs = (amount * 0.45359237, 'KILOGRAM')
if kgs[0] < 1.0:
return (kgs[0] * 1000, 'GRAM')
return kgs
if unit == 'OUNCE':
return (amount * 28.3495231, 'GRAM')
if unit == 'GALLON':
return (amount * 3.78541178, 'LITER')
if unit == 'TEASPOON':
return (amount * 0.00492892159, 'LITER')
if unit == 'TABLESPOON':
return (amount * 0.0147867648, 'LITER')
return (amount, unit)
def to_kg(amount, unit):
amount, unit = to_metric(amount, unit)
if unit == 'GRAM':
amount /= 1000.00
return amount
def to_l(amount, unit):
if unit == 'OUNCE':
return amount * 0.0295735296
amount, _ = to_metric(amount, unit)
return amount
|
|
"""Define random number Type (`RandomStateType`) and Op (`RandomFunction`)."""
from __future__ import absolute_import, print_function, division
import sys
from copy import copy
import numpy
from six import string_types
from six.moves import reduce, xrange
# local imports
import theano
from theano import tensor
from theano.tensor import opt
from theano import gof
from theano.compile import optdb
__docformat__ = "restructuredtext en"
class RandomStateType(gof.Type):
"""
A Type wrapper for numpy.random.RandomState.
The reason this exists (and `Generic` doesn't suffice) is that
RandomState objects that would appear to be equal do not compare
equal with the '==' operator. This Type exists to provide an equals
function that is used by DebugMode.
"""
def __str__(self):
return 'RandomStateType'
def filter(self, data, strict=False, allow_downcast=None):
if self.is_valid_value(data):
return data
else:
raise TypeError()
def is_valid_value(self, a):
return type(a) == numpy.random.RandomState
def values_eq(self, a, b):
sa = a.get_state()
sb = b.get_state()
# Should always be the string 'MT19937'
if sa[0] != sb[0]:
return False
# 1-D array of 624 unsigned integer keys
if not numpy.all(sa[1] == sb[1]):
return False
# integer "pos" representing the position in the array
if sa[2] != sb[2]:
return False
# integer "has_gauss"
if sa[3] != sb[3]:
return False
# float "cached_gaussian".
# /!\ It is not initialized if has_gauss == 0
if sa[3] != 0:
if sa[4] != sb[4]:
return False
return True
def get_shape_info(self, obj):
return None
def get_size(self, shape_info):
# The size is the data, that have constant size.
state = numpy.random.RandomState().get_state()
size = 0
for elem in state:
if isinstance(elem, str):
size += len(elem)
elif isinstance(elem, numpy.ndarray):
size += elem.size * elem.itemsize
elif isinstance(elem, int):
size += numpy.dtype("int").itemsize
elif isinstance(elem, float):
size += numpy.dtype("float").itemsize
else:
raise NotImplementedError()
return size
@staticmethod
def may_share_memory(a, b):
return a is b
# Register RandomStateType's C code for ViewOp.
theano.compile.register_view_op_c_code(
RandomStateType,
"""
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
Py_XINCREF(%(oname)s);
""",
1)
random_state_type = RandomStateType()
class RandomFunction(gof.Op):
"""
Op that draws random numbers from a numpy.random.RandomState object.
Parameters
----------
fn : string or function reference
A member function of numpy.random.RandomState. A string will
be interpreted as the name of a member function of
numpy.random.RandomState.
Technically, any function with a signature like the ones in
numpy.random.RandomState will do. This function must accept
the shape (sometimes called size) of the output as the last
positional argument.
outtype
The theano Type of the output.
args
A list of default arguments for the function
kwargs
If the 'inplace' key is there, its value will be used to
determine if the op operates inplace or not.
If the 'ndim_added' key is there, its value indicates how
many more dimensions this op will add to the output, in
addition to the shape's dimensions (used in multinomial and
permutation).
"""
__props__ = ("fn", "outtype", "inplace", "ndim_added")
def __init__(self, fn, outtype, inplace=False, ndim_added=0):
self.__setstate__([fn, outtype, inplace, ndim_added])
def __getstate__(self):
d = dict(self.__dict__)
del d['exec_fn']
if 'destroy_map' in d:
del d['destroy_map']
return d
def __setstate__(self, dct):
if isinstance(dct, dict):
state = [dct['fn'],
dct['outtype'],
dct['inplace'],
dct['ndim_added']]
self.__dict__.update(dct)
else:
state = dct
fn, outtype, inplace, ndim_added = state
self.fn = fn
if isinstance(fn, string_types):
self.exec_fn = getattr(numpy.random.RandomState, fn)
else:
self.exec_fn = fn
self.outtype = outtype
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
self.ndim_added = ndim_added
def __str__(self):
return 'RandomFunction{%s}' % self.exec_fn.__name__
def make_node(self, r, shape, *args):
"""
Parameters
----------
r
A numpy.random.RandomState instance, or a Variable of Type
RandomStateType that will contain a RandomState instance.
shape
An lvector with a shape defining how many samples
to draw. In the case of scalar distributions, it is the shape
of the tensor output by this Op. In that case, at runtime, the
value associated with this lvector must have a length equal to
the number of dimensions promised by `self.outtype`.
In a more general case, the number of output dimensions,
len(self.outtype), is equal to len(shape)+self.ndim_added.
The special case where len(shape) == 0 means that the smallest
shape compatible with the argument's shape will be used.
args
The values associated with these variables will be passed to the
RandomState function during perform as extra "*args"-style
arguments. These should be castable to variables of Type TensorType.
Returns
-------
Apply
Apply with two outputs. The first output is a gof.generic Variable
from which to draw further random numbers.
The second output is the outtype() instance holding the random
draw.
"""
shape_ = tensor.as_tensor_variable(shape, ndim=1)
if shape == ():
shape = shape_.astype('int64')
else:
shape = shape_
assert shape.type.ndim == 1
assert (shape.type.dtype == 'int64') or (shape.type.dtype == 'int32')
if not isinstance(r.type, RandomStateType):
print('WARNING: RandomState instances should be in RandomStateType', file=sys.stderr)
if 0:
raise TypeError('r must be RandomStateType instance', r)
# the following doesn't work because we want to ignore the
# broadcastable flags in shape.type
# assert shape.type == tensor.lvector
# convert args to TensorType instances
# and append enough None's to match the length of self.args
args = list(map(tensor.as_tensor_variable, args))
return gof.Apply(self,
[r, shape] + args,
[r.type(), self.outtype()])
def infer_shape(self, node, i_shapes):
r, shp = node.inputs[0:2]
# if shp is a constant array of len 0, then it means 'automatic shape'
unknown_shape = len(getattr(shp, 'data', [0, 1, 2])) == 0
# if ndim_added == 0 and shape != () then shape
if self.ndim_added == 0 and not unknown_shape:
sample_shp = shp
else:
# if shape == () then it will depend on args
# if ndim_added != 0 and shape != () then it will depend on args
# Use the default infer_shape implementation.
raise tensor.ShapeError()
return [None, [sample_shp[i] for i in xrange(node.outputs[1].ndim)]]
def perform(self, node, inputs, out_):
rout, out = out_
# Use self.fn to draw shape worth of random numbers.
# Numbers are drawn from r if self.inplace is True, and from a
# copy of r if self.inplace is False
r, shape, args = inputs[0], inputs[1], inputs[2:]
assert type(r) == numpy.random.RandomState, (type(r), r)
# If shape == [], that means no shape is enforced, and numpy is
# trusted to draw the appropriate number of samples, numpy uses
# shape "None" to represent that. Else, numpy expects a tuple.
# TODO: compute the appropriate shape, and pass it to numpy.
if len(shape) == 0:
shape = None
else:
shape = tuple(shape)
if (shape is not None and
self.outtype.ndim != len(shape) + self.ndim_added):
raise ValueError('Shape mismatch: self.outtype.ndim (%i) !='
' len(shape) (%i) + self.ndim_added (%i)'
% (self.outtype.ndim, len(shape), self.ndim_added))
if not self.inplace:
r = copy(r)
rout[0] = r
rval = self.exec_fn(r, *(args + [shape]))
if (not isinstance(rval, numpy.ndarray) or
str(rval.dtype) != node.outputs[1].type.dtype):
rval = theano._asarray(rval, dtype=node.outputs[1].type.dtype)
# When shape is None, numpy has a tendency to unexpectedly
# return a scalar instead of a higher-dimension array containing
# only one element. This value should be reshaped
if shape is None and rval.ndim == 0 and self.outtype.ndim > 0:
rval = rval.reshape([1] * self.outtype.ndim)
if len(rval.shape) != self.outtype.ndim:
raise ValueError('Shape mismatch: "out" should have dimension %i,'
' but the value produced by "perform" has'
' dimension %i'
% (self.outtype.ndim, len(rval.shape)))
# Check the output has the right shape
if shape is not None:
if self.ndim_added == 0 and shape != rval.shape:
raise ValueError(
'Shape mismatch: "out" should have shape %s, but the'
' value produced by "perform" has shape %s'
% (shape, rval.shape))
elif (self.ndim_added > 0 and
shape != rval.shape[:-self.ndim_added]):
raise ValueError(
'Shape mismatch: "out" should have shape starting with'
' %s (plus %i extra dimensions), but the value produced'
' by "perform" has shape %s'
% (shape, self.ndim_added, rval.shape))
out[0] = rval
def grad(self, inputs, outputs):
return [theano.gradient.grad_undefined(self, k, inp,
'No gradient defined through raw random numbers op')
for k, inp in enumerate(inputs)]
def R_op(self, inputs, eval_points):
return [None for i in eval_points]
def _infer_ndim_bcast(ndim, shape, *args):
"""
Infer the number of dimensions from the shape or the other arguments.
Returns
-------
(int, variable, tuple) triple, where the variable is an integer vector,
and the tuple contains Booleans
The first element returned is the inferred number of dimensions.
The second element is the shape inferred (combining symbolic and
constant informations from shape and args).
The third element is a broadcasting pattern corresponding to that shape.
"""
# Find the minimum value of ndim required by the *args
if args:
args_ndim = max(arg.ndim for arg in args)
else:
args_ndim = 0
if isinstance(shape, (tuple, list)):
# there is a convention that -1 means the corresponding shape of a
# potentially-broadcasted symbolic arg
#
# This case combines together symbolic and non-symbolic shape
# information
shape_ndim = len(shape)
if ndim is None:
ndim = shape_ndim
else:
if shape_ndim != ndim:
raise ValueError('ndim should be equal to len(shape), but\n',
'ndim = %s, len(shape) = %s, shape = %s'
% (ndim, shape_ndim, shape))
bcast = []
pre_v_shape = []
for i, s in enumerate(shape):
if hasattr(s, 'type'): # s is symbolic
bcast.append(False) # todo - introspect further
pre_v_shape.append(s)
else:
if s >= 0:
pre_v_shape.append(tensor.as_tensor_variable(s))
bcast.append((s == 1))
elif s == -1:
n_a_i = 0
for a in args:
# ndim: _ _ _ _ _ _
# ashp: s0 s1 s2 s3
# i
if i >= ndim - a.ndim:
n_a_i += 1
a_i = i + a.ndim - ndim
if not a.broadcastable[a_i]:
pre_v_shape.append(a.shape[a_i])
bcast.append(False)
break
else:
if n_a_i == 0:
raise ValueError((
'Auto-shape of -1 must overlap'
'with the shape of one of the broadcastable'
'inputs'))
else:
pre_v_shape.append(tensor.as_tensor_variable(1))
bcast.append(True)
else:
ValueError('negative shape', s)
# post-condition: shape may still contain both symbolic and
# non-symbolic things
if len(pre_v_shape) == 0:
v_shape = tensor.constant([], dtype='int64')
else:
v_shape = tensor.stack(pre_v_shape)
elif shape is None:
# The number of drawn samples will be determined automatically,
# but we need to know ndim
if not args:
raise TypeError(('_infer_ndim_bcast cannot infer shape without'
' either shape or args'))
template = reduce(lambda a, b: a + b, args)
v_shape = template.shape
bcast = template.broadcastable
ndim = template.ndim
else:
v_shape = tensor.as_tensor_variable(shape)
if v_shape.ndim != 1:
raise TypeError(
"shape must be a vector or list of scalar, got '%s'" % v_shape)
if ndim is None:
ndim = tensor.get_vector_length(v_shape)
bcast = [False] * ndim
if v_shape.ndim != 1:
raise TypeError("shape must be a vector or list of scalar, got '%s'" %
v_shape)
if (not (v_shape.dtype.startswith('int') or
v_shape.dtype.startswith('uint'))):
raise TypeError('shape must be an integer vector or list',
v_shape.dtype)
if args_ndim > ndim:
raise ValueError(
'ndim should be at least as big as required by args value',
(ndim, args_ndim), args)
assert ndim == len(bcast)
return ndim, tensor.cast(v_shape, 'int64'), tuple(bcast)
def _generate_broadcasting_indices(out_shape, *shapes):
"""
Return indices over each shape that broadcast them to match out_shape.
The first returned list is equivalent to numpy.ndindex(out_shape),
the other returned lists are indices corresponding to the other shapes,
such that looping over these indices produce tensors of shape out_shape.
In particular, the indices over broadcasted dimensions should all be 0.
The shapes should have the same length as out_shape. If they are longer,
the right-most dimensions are ignored.
"""
all_shapes = (out_shape,) + shapes
# Will contain the return value: a list of indices for each argument
ret_indices = [[()] for shape in all_shapes]
for dim in xrange(len(out_shape)):
# Temporary list to generate the indices
_ret_indices = [[] for shape in all_shapes]
out_range = list(range(out_shape[dim]))
# Verify the shapes are compatible along that dimension
# and generate the appropriate range: out_range, or [0, ..., 0]
ranges = [out_range]
for shape in shapes:
if shape[dim] == out_shape[dim]:
ranges.append(out_range)
elif shape[dim] == 1: # broadcast
ranges.append([0] * out_shape[dim])
else:
raise ValueError(
'shape[%i] (%i) should be equal to out_shape[%i] (%i) or'
' to 1'
% (dim, shape[dim], dim, out_shape[dim]), shape,
out_shape, shapes)
for prev_index in zip(*ret_indices):
for dim_index in zip(*ranges):
for i in xrange(len(all_shapes)):
_ret_indices[i].append(prev_index[i] + (dim_index[i],))
ret_indices = _ret_indices
return ret_indices
def uniform(random_state, size=None, low=0.0, high=1.0, ndim=None, dtype=None):
"""
Sample from a uniform distribution between low and high.
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of low and high.
If dtype is not specified, it will be inferred from the dtype of
low and high, but will be at least as precise as floatX.
"""
low = tensor.as_tensor_variable(low)
high = tensor.as_tensor_variable(high)
if dtype is None:
dtype = tensor.scal.upcast(theano.config.floatX, low.dtype, high.dtype)
ndim, size, bcast = _infer_ndim_bcast(ndim, size, low, high)
op = RandomFunction('uniform',
tensor.TensorType(dtype=dtype, broadcastable=bcast))
return op(random_state, size, low, high)
def normal(random_state, size=None, avg=0.0, std=1.0, ndim=None, dtype=None):
"""
Sample from a normal distribution centered on avg with
the specified standard deviation (std).
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of avg and std.
If dtype is not specified, it will be inferred from the dtype of
avg and std, but will be at least as precise as floatX.
"""
avg = tensor.as_tensor_variable(avg)
std = tensor.as_tensor_variable(std)
if dtype is None:
dtype = tensor.scal.upcast(theano.config.floatX, avg.dtype, std.dtype)
ndim, size, bcast = _infer_ndim_bcast(ndim, size, avg, std)
op = RandomFunction('normal',
tensor.TensorType(dtype=dtype, broadcastable=bcast))
return op(random_state, size, avg, std)
def binomial(random_state, size=None, n=1, p=0.5, ndim=None,
dtype='int64', prob=None):
"""
Sample n times with probability of success prob for each trial,
return the number of successes.
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of n and prob.
"""
if prob is not None:
p = prob
print("DEPRECATION WARNING: the parameter prob to the binomal fct have been renamed to p to have the same name as numpy.", file=sys.stderr)
n = tensor.as_tensor_variable(n)
p = tensor.as_tensor_variable(p)
ndim, size, bcast = _infer_ndim_bcast(ndim, size, n, p)
if n.dtype == 'int64':
try:
numpy.random.binomial(n=numpy.asarray([2, 3, 4], dtype='int64'), p=numpy.asarray([.1, .2, .3], dtype='float64'))
except TypeError:
# THIS WORKS AROUND A NUMPY BUG on 32bit machine
n = tensor.cast(n, 'int32')
op = RandomFunction('binomial',
tensor.TensorType(dtype=dtype,
broadcastable=(False,) * ndim))
return op(random_state, size, n, p)
def random_integers_helper(random_state, low, high, size):
"""
Helper function to draw random integers.
This is a generalization of numpy.random.random_integers to the case where
low and high are tensors.
"""
# Figure out the output shape
if size is not None:
out_ndim = len(size)
else:
out_ndim = max(low.ndim, high.ndim)
# broadcast low and high to out_ndim dimensions
if low.ndim > out_ndim:
raise ValueError(
'low.ndim (%i) should not be larger than len(size) (%i)'
% (low.ndim, out_ndim),
low, size)
if low.ndim < out_ndim:
low = low.reshape((1,) * (out_ndim - low.ndim) + low.shape)
if high.ndim > out_ndim:
raise ValueError(
'high.ndim (%i) should not be larger than len(size) (%i)'
% (high.ndim, out_ndim), high, size)
if high.ndim < out_ndim:
high = high.reshape((1,) * (out_ndim - high.ndim) + high.shape)
if size is not None:
out_size = tuple(size)
else:
out_size = ()
for dim in xrange(out_ndim):
dim_len = max(low.shape[dim], high.shape[dim])
out_size = out_size + (dim_len,)
# Build the indices over which to loop
out = numpy.ndarray(out_size)
broadcast_ind = _generate_broadcasting_indices(out_size, low.shape,
high.shape)
# Iterate over these indices, drawing one sample at a time from numpy
for oi, li, hi in zip(*broadcast_ind):
out[oi] = random_state.random_integers(low=low[li], high=high[hi])
return out
def random_integers(random_state, size=None, low=0, high=1, ndim=None,
dtype='int64'):
"""
Sample a random integer between low and high, both inclusive.
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, the output shape will be determined by the shapes
of low and high.
"""
low = tensor.as_tensor_variable(low)
high = tensor.as_tensor_variable(high)
ndim, size, bcast = _infer_ndim_bcast(ndim, size, low, high)
op = RandomFunction(random_integers_helper,
tensor.TensorType(dtype=dtype, broadcastable=bcast))
return op(random_state, size, low, high)
def choice_helper(random_state, a, replace, p, size):
"""
Helper function to draw random numbers using numpy's choice function.
This is a generalization of numpy.random.choice that coerces
`replace` to a bool and replaces `p` with None when p is a vector
of 0 elements.
"""
if a.ndim > 1:
raise ValueError('a.ndim (%i) must be 0 or 1' % a.ndim)
if p.ndim == 1:
if p.size == 0:
p = None
else:
raise ValueError('p.ndim (%i) must be 1' % p.ndim)
replace = bool(replace)
return random_state.choice(a, size, replace, p)
def choice(random_state, size=None, a=2, replace=True, p=None, ndim=None,
dtype='int64'):
"""
Choose values from `a` with or without replacement. `a` can be a 1-D array
or a positive scalar. If `a` is a scalar, the samples are drawn from the
range 0,...,a-1.
If the size argument is ambiguous on the number of dimensions, ndim
may be a plain integer to supplement the missing information.
If size is None, a scalar will be returned.
"""
# numpy.random.choice is only available for numpy versions >= 1.7
major, minor, _ = numpy.version.short_version.split('.')
if (int(major), int(minor)) < (1, 7):
raise ImportError('choice requires at NumPy version >= 1.7 '
'(%s)' % numpy.__version__)
a = tensor.as_tensor_variable(a)
if isinstance(replace, bool):
replace = tensor.constant(replace, dtype='int8')
else:
replace = tensor.as_tensor_variable(replace)
# encode p=None as an empty vector
p = tensor.as_tensor_variable(p or [])
ndim, size, bcast = _infer_ndim_bcast(ndim, size)
op = RandomFunction(choice_helper, tensor.TensorType(dtype=dtype,
broadcastable=bcast))
return op(random_state, size, a, replace, p)
def poisson(random_state, size=None, lam=1.0, ndim=None, dtype='int64'):
"""
Draw samples from a Poisson distribution.
The Poisson distribution is the limit of the Binomial distribution for
large N.
Parameters
----------
lam : float or ndarray-like of the same shape as size parameter
Expectation of interval, should be >= 0.
size: int or tuple of ints, optional
Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
samples are drawn.
dtype
The dtype of the return value (which will represent counts).
size or ndim must be given.
"""
lam = tensor.as_tensor_variable(lam)
ndim, size, bcast = _infer_ndim_bcast(ndim, size)
op = RandomFunction("poisson", tensor.TensorType(dtype=dtype,
broadcastable=bcast))
return op(random_state, size, lam)
def permutation_helper(random_state, n, shape):
"""
Helper function to generate permutations from integers.
permutation_helper(random_state, n, (1,)) will generate a permutation of
integers 0..n-1.
In general, it will generate as many such permutation as required by shape.
For instance, if shape=(p,q), p*q permutations will be generated, and the
output shape will be (p,q,n), because each permutation is of size n.
If you wish to perform a permutation of the elements of an existing vector,
see shuffle_row_elements.
This is a generalization of numpy.random.permutation to tensors.
Otherwise it behaves the same.
"""
# n should be a 0-dimension array
assert n.shape == ()
# Note that it is important to convert `n` into an integer, because if it
# is a long, the numpy permutation function will crash on Windows.
n = int(n.item())
if shape is None:
# Draw only one permutation, equivalent to shape = ()
shape = ()
out_shape = list(shape)
out_shape.append(n)
out = numpy.empty(out_shape, int)
for i in numpy.ndindex(*shape):
out[i] = random_state.permutation(n)
# print 'RETURNING', out.shape
return out
def permutation(random_state, size=None, n=1, ndim=None, dtype='int64'):
"""
Return permutations of the integers between 0 and n-1.
Returns them as many times as required by size. For instance, if size=(p,q),
p*q permutations will be generated, and the output shape will be (p,q,n),
because each permutation is of size n.
Theano tries to infer the number of dimensions from the length of
the size argument and the shape of n, but you may always specify it
with the `ndim` parameter.
Notes
-----
Note that the output will then be of dimension ndim+1.
"""
if size is None or size == ():
if not(ndim is None or ndim == 1):
raise TypeError(
"You asked for just one permutation but asked for more then 1 dimensions.")
ndim = 1
size = ()
bcast = ()
else:
ndim, size, bcast = _infer_ndim_bcast(ndim, size)
# print "NDIM", ndim, size
op = RandomFunction(permutation_helper,
tensor.TensorType(dtype=dtype,
broadcastable=bcast + (False,)),
ndim_added=1)
return op(random_state, size, n)
def multinomial_helper(random_state, n, pvals, size):
"""
Helper function drawing from multinomial distributions.
This is a generalization of numpy.random.multinomial to the case where
n and pvals are tensors.
"""
# Figure out the shape if it's None
# Note: the output ndim will be ndim+1, because the multinomial
# adds a dimension. The length of that dimension is pvals.shape[-1].
if size is not None:
ndim = len(size)
else:
ndim = max(n.ndim, pvals.ndim - 1)
# broadcast n to ndim dimensions and pvals to ndim+1
if n.ndim > ndim:
raise ValueError('n.ndim (%i) should not be larger than len(size) (%i)'
% (n.ndim, ndim), n, size)
if n.ndim < ndim:
n = n.reshape((1,) * (ndim - n.ndim) + n.shape)
if pvals.ndim - 1 > ndim:
raise ValueError(
'pvals.ndim-1 (%i) should not be larger than len(size) (%i)'
% (pvals.ndim - 1, ndim),
pvals, size)
if pvals.ndim - 1 < ndim:
pvals = pvals.reshape((1,) * (ndim - pvals.ndim + 1) + pvals.shape)
if size is not None:
size = tuple(size)
else:
size = ()
for dim in xrange(ndim):
dim_len = max(n.shape[dim], pvals.shape[dim])
size = size + (dim_len,)
out_size = size + (pvals.shape[-1],)
# Build the indices over which to loop
# Note that here, the rows (inner-most 1D subtensors) of pvals and out
# are indexed, not their individual elements
out = numpy.ndarray(out_size)
broadcast_ind = _generate_broadcasting_indices(size, n.shape,
pvals.shape[:-1])
# Iterate over these indices, drawing from one multinomial at a
# time from numpy
assert pvals.min() >= 0
for mi, ni, pi in zip(*broadcast_ind):
pvi = pvals[pi]
# This might someday be fixed upstream
# Currently numpy raises an exception in this method if the sum
# of probabilities meets or exceeds 1.0.
# In perfect arithmetic this would be correct, but in float32 or
# float64 it is too strict.
pisum = numpy.sum(pvi)
if 1.0 < pisum < 1.0 + 1e-5: # correct if we went a little over
# because mtrand.pyx has a ValueError that will trigger if
# sum(pvals[:-1]) > 1.0
pvi = pvi * (1.0 - 5e-5)
# pvi = pvi * .9
pisum = numpy.sum(pvi)
elif pvi[-1] < 5e-5: # will this even work?
pvi = pvi * (1.0 - 5e-5)
pisum = numpy.sum(pvi)
assert pisum <= 1.0, pisum
out[mi] = random_state.multinomial(n=n[ni],
pvals=pvi.astype('float64'))
return out
def multinomial(random_state, size=None, n=1, pvals=[0.5, 0.5],
ndim=None, dtype='int64'):
"""
Sample from one or more multinomial distributions defined by
one-dimensional slices in pvals.
Parameters
----------
pvals
A tensor of shape "nmulti+(L,)" describing each multinomial
distribution. This tensor must have the property that
numpy.allclose(pvals.sum(axis=-1), 1) is true.
size
A vector of shape information for the output; this can also
specify the "nmulti" part of pvals' shape. A -1 in the k'th position
from the right means to borrow the k'th position from the
right in nmulti. (See examples below.)
Default ``None`` means size=nmulti.
n
The number of experiments to simulate for each
multinomial. This can be a scalar, or tensor, it will be
broadcasted to have shape "nmulti".
dtype
The dtype of the return value (which will represent counts)
Returns
-------
tensor
Tensor of len(size)+1 dimensions, and shape[-1]==L, with
the specified ``dtype``, with the experiment counts. See
examples to understand the shape of the return value, which is
derived from both size and pvals.shape. In return value rval,
"numpy.allclose(rval.sum(axis=-1), n)" will be true.
Extended Summary
----------------
For example, to simulate n experiments from each multinomial in a batch of
size B:
size=None, pvals.shape=(B,L) --> rval.shape=[B,L]
rval[i,j] is the count of possibility j in the i'th distribution (row)
in pvals.
Using size:
size=(1,-1), pvals.shape=(A,B,L)
--> rval.shape=[1,B,L], and requires that A==1.
rval[k,i,j] is the count of possibility j in the distribution specified
by pvals[k,i].
Using size for broadcasting of pvals:
size=(10, 1, -1), pvals.shape=(A, B, L)
--> rval.shape=[10,1,B,L], and requires that A==1.
rval[l,k,i,j] is the count of possibility j in the
distribution specified by pvals[k,i], in the l'th of 10
draws.
"""
n = tensor.as_tensor_variable(n)
pvals = tensor.as_tensor_variable(pvals)
# until ellipsis is implemented (argh)
tmp = pvals.T[0].T
ndim, size, bcast = _infer_ndim_bcast(ndim, size, n, tmp)
bcast = bcast + (pvals.type.broadcastable[-1],)
op = RandomFunction(multinomial_helper,
tensor.TensorType(dtype=dtype,
broadcastable=bcast),
ndim_added=1)
return op(random_state, size, n, pvals)
@gof.local_optimizer([RandomFunction])
def random_make_inplace(node):
op = node.op
if isinstance(op, RandomFunction) and not op.inplace:
# Read op_fn from op.state, not from op.fn, since op.fn
# may not be picklable.
op_fn, op_outtype, op_inplace, op_ndim_added = op._props()
new_op = RandomFunction(op_fn, op_outtype, inplace=True,
ndim_added=op_ndim_added)
return new_op.make_node(*node.inputs).outputs
return False
optdb.register('random_make_inplace', opt.in2out(random_make_inplace,
ignore_newtrees=True),
99, 'fast_run', 'inplace')
class RandomStreamsBase(object):
def binomial(self, size=None, n=1, p=0.5, ndim=None, dtype='int64',
prob=None):
"""
Sample n times with probability of success p for each trial and
return the number of successes.
If the size argument is ambiguous on the number of dimensions,
ndim may be a plain integer to supplement the missing information.
"""
if prob is not None:
p = prob
print("DEPRECATION WARNING: the parameter prob to the binomal fct have been renamed to p to have the same name as numpy.", file=sys.stderr)
return self.gen(binomial, size, n, p, ndim=ndim, dtype=dtype)
def uniform(self, size=None, low=0.0, high=1.0, ndim=None, dtype=None):
"""
Sample a tensor of given size whose element from a uniform
distribution between low and high.
If the size argument is ambiguous on the number of dimensions,
ndim may be a plain integer to supplement the missing information.
"""
return self.gen(uniform, size, low, high, ndim=ndim, dtype=dtype)
def normal(self, size=None, avg=0.0, std=1.0, ndim=None, dtype=None):
"""
Sample from a normal distribution centered on avg with
the specified standard deviation (std).
If the size argument is ambiguous on the number of dimensions,
ndim may be a plain integer to supplement the missing information.
"""
return self.gen(normal, size, avg, std, ndim=ndim, dtype=dtype)
def random_integers(self, size=None, low=0, high=1, ndim=None,
dtype='int64'):
"""
Sample a random integer between low and high, both inclusive.
If the size argument is ambiguous on the number of dimensions,
ndim may be a plain integer to supplement the missing information.
"""
return self.gen(random_integers, size, low, high, ndim=ndim,
dtype=dtype)
def choice(self, size=None, a=2, replace=True, p=None, ndim=None,
dtype='int64'):
"""
Choose values from `a` with or without replacement.
`a` can be a 1-D array or a positive scalar.
If `a` is a scalar, the samples are drawn from the range 0,...,a-1.
If the size argument is ambiguous on the number of dimensions,
ndim may be a plain integer to supplement the missing information.
"""
return self.gen(choice, size, a, replace, p, ndim=ndim, dtype=dtype)
def poisson(self, size=None, lam=None, ndim=None, dtype='int64'):
"""
Draw samples from a Poisson distribution.
The Poisson distribution is the limit of the Binomial distribution for
large N.
If the size argument is ambiguous on the number of dimensions,
ndim may be a plain integer to supplement the missing information.
"""
return self.gen(poisson, size, lam, ndim=ndim, dtype=dtype)
def permutation(self, size=None, n=1, ndim=None, dtype='int64'):
"""
Return permutations of the integers between 0 and n-1.
Returns them as many times as required by size. For instance,
if size=(p,q), p*q permutations will be generated,
and the output shape will be (p,q,n), because each
permutation is of size n.
Theano tries to infer the number of dimensions from the length
of the size argument and the shape of n, but you may always
specify it with the `ndim` parameter.
Notes
-----
Note that the output will then be of dimension ndim+1.
"""
return self.gen(permutation, size, n, ndim=ndim, dtype=dtype)
def multinomial(self, size=None, n=1, pvals=[0.5, 0.5], ndim=None,
dtype='int64'):
"""
Sample n times from a multinomial distribution defined by
probabilities pvals, as many times as required by size. For
instance, if size=(p,q), p*q samples will be drawn, and the
output shape will be (p,q,len(pvals)).
Theano tries to infer the number of dimensions from the length
of the size argument and the shapes of n and pvals, but you may
always specify it with the `ndim` parameter.
Notes
-----
Note that the output will then be of dimension ndim+1.
"""
return self.gen(multinomial, size, n, pvals, ndim=ndim, dtype=dtype)
def shuffle_row_elements(self, input):
"""
Return a variable with every row (rightmost index) shuffled.
This uses permutation random variable internally, available via
the ``.permutation`` attribute of the return value.
"""
perm = self.permutation(size=input.shape[:-1], n=input.shape[-1],
ndim=input.ndim - 1)
shuffled = tensor.permute_row_elements(input, perm)
shuffled.permutation = perm
return shuffled
|
|
# *****************************************************************************
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See NOTICE file for details.
#
# *****************************************************************************
import sys
import jpype
import common
from jpype.types import *
from jpype import java
def passThrough(item):
al = JClass("java.util.ArrayList")()
al.add(item)
return al.get(0)
class BoxedTestCase(common.JPypeTestCase):
__name__ = "BoxedTestCase"
def setUp(self):
common.JPypeTestCase.setUp(self)
self.TestBoxed = jpype.JClass('jpype.boxed.Boxed')
self.Number = jpype.JClass('java.lang.Number')
self.Comparable = jpype.JClass('java.lang.Comparable')
def testShort(self):
c1 = 12345
# Check passed from and passed to
d1 = self.TestBoxed.newShort(c1)
d2 = java.lang.Short(c1)
self.assertEqual(d1, c1)
self.assertEqual(d2, c1)
self.assertEqual(c1, d1)
self.assertEqual(c1, d2)
self.assertEqual(d1, d2)
self.assertEqual(self.TestBoxed.callShort(c1),
self.TestBoxed.callShort(d2))
# Verify ops
self.assertEqual(d1 + 2, d1 + 2)
self.assertEqual(d1 * 2, d1 * 2)
def testInteger(self):
c1 = 12345
# Check passed from and passed to
d1 = self.TestBoxed.newInteger(c1)
d2 = java.lang.Integer(c1)
self.assertEqual(d1, c1)
self.assertEqual(d2, c1)
self.assertEqual(c1, d1)
self.assertEqual(c1, d2)
self.assertEqual(d1, d2)
self.assertEqual(self.TestBoxed.callInteger(c1),
self.TestBoxed.callInteger(d2))
# Verify ops
self.assertEqual(d1 + 2, d1 + 2)
self.assertEqual(d1 * 2, d1 * 2)
def testLong(self):
c1 = 12345
# Check passed from and passed to
d1 = self.TestBoxed.newLong(c1)
d2 = java.lang.Long(c1)
self.assertEqual(d1, c1)
self.assertEqual(d2, c1)
self.assertEqual(c1, d1)
self.assertEqual(c1, d2)
self.assertEqual(d1, d2)
self.assertEqual(self.TestBoxed.callLong(c1),
self.TestBoxed.callLong(d2))
# Verify ops
self.assertEqual(d1 + 2, d1 + 2)
self.assertEqual(d1 * 2, d1 * 2)
def testDoubleFromFloat(self):
java.lang.Double(1.0)
def testFloatFromInt(self):
java.lang.Float(1)
def testDoubleFromInt(self):
java.lang.Double(1)
def testBoxed2(self):
java.lang.Short(java.lang.Integer(1))
java.lang.Integer(java.lang.Integer(1))
java.lang.Long(java.lang.Integer(1))
java.lang.Float(java.lang.Integer(1))
java.lang.Float(java.lang.Long(1))
java.lang.Double(java.lang.Integer(1))
java.lang.Double(java.lang.Long(1))
java.lang.Double(java.lang.Float(1))
def testFloat(self):
c1 = 123124 / 256.0
# Check passed from and passed to
d1 = self.TestBoxed.newFloat(c1)
d2 = java.lang.Float(c1)
self.assertEqual(d1, c1)
self.assertEqual(d2, c1)
self.assertEqual(c1, d1)
self.assertEqual(c1, d2)
self.assertEqual(d1, d2)
self.assertEqual(self.TestBoxed.callFloat(c1),
self.TestBoxed.callFloat(d2))
# Verify ops
self.assertEqual(d1 + 2, d1 + 2)
self.assertEqual(d1 * 2, d1 * 2)
self.assertTrue(d2 < c1 + 1)
self.assertTrue(d2 > c1 - 1)
def testDouble(self):
c1 = 123124 / 256.0
# Check passed from and passed to
d1 = self.TestBoxed.newDouble(c1)
d2 = java.lang.Double(c1)
self.assertEqual(d1, c1)
self.assertEqual(d2, c1)
self.assertEqual(c1, d1)
self.assertEqual(c1, d2)
self.assertEqual(d1, d2)
self.assertEqual(self.TestBoxed.callDouble(c1),
self.TestBoxed.callDouble(d2))
# Verify ops
self.assertEqual(d1 + 2, d1 + 2)
self.assertEqual(d1 * 2, d1 * 2)
self.assertTrue(d2 < c1 + 1)
self.assertTrue(d2 > c1 - 1)
def testShortResolve(self):
self.assertEqual(self.TestBoxed.whichShort(1), 1)
self.assertEqual(self.TestBoxed.whichShort(java.lang.Short(1)), 2)
def testIntegerResolve(self):
self.assertEqual(self.TestBoxed.whichInteger(1), 1)
self.assertEqual(self.TestBoxed.whichInteger(java.lang.Integer(1)), 2)
def testLongResolve(self):
self.assertEqual(self.TestBoxed.whichLong(1), 1)
self.assertEqual(self.TestBoxed.whichLong(java.lang.Long(1)), 2)
def testFloatResolve(self):
self.assertEqual(self.TestBoxed.whichFloat(1.0), 1)
self.assertEqual(self.TestBoxed.whichFloat(java.lang.Float(1.0)), 2)
def testDoubleResolve(self):
self.assertEqual(self.TestBoxed.whichDouble(1.0), 1)
self.assertEqual(self.TestBoxed.whichDouble(java.lang.Double(1.0)), 2)
def testPrivitiveToBoxed(self):
java.lang.Boolean(JBoolean(0))
java.lang.Byte(JByte(0))
java.lang.Short(JShort(0))
java.lang.Integer(JInt(0))
java.lang.Long(JLong(0))
java.lang.Float(JFloat(0))
java.lang.Double(JDouble(0))
def testBooleanBad(self):
# java.lang.Boolean(X) works like bool(X)
# Explicit is a cast
self.assertFalse(java.lang.Boolean(tuple()))
self.assertFalse(java.lang.Boolean(list()))
self.assertFalse(java.lang.Boolean(dict()))
self.assertFalse(java.lang.Boolean(set()))
self.assertTrue(java.lang.Boolean(tuple(['a'])))
self.assertTrue(java.lang.Boolean(['a']))
self.assertTrue(java.lang.Boolean({'a': 1}))
self.assertTrue(java.lang.Boolean(set(['a', 'b'])))
# Implicit does not automatically cast
fixture = JClass('jpype.common.Fixture')()
with self.assertRaises(TypeError):
fixture.callBoxedBoolean(tuple())
with self.assertRaises(TypeError):
fixture.callBoxedBoolean(list())
with self.assertRaises(TypeError):
fixture.callBoxedBoolean(dict())
with self.assertRaises(TypeError):
fixture.callBoxedBoolean(set())
def testByteBad(self):
with self.assertRaises(TypeError):
java.lang.Byte(tuple())
def testCharacterBad(self):
with self.assertRaises(TypeError):
java.lang.Character(tuple())
def testShortBad(self):
with self.assertRaises(TypeError):
java.lang.Short(tuple())
def testIntegerBad(self):
with self.assertRaises(TypeError):
java.lang.Integer(tuple())
def testLongBad(self):
with self.assertRaises(TypeError):
java.lang.Long(tuple())
def testFloatBad(self):
with self.assertRaises(TypeError):
java.lang.Float(tuple())
def testDoubleBad(self):
with self.assertRaises(TypeError):
java.lang.Double(tuple())
def testBooleanBad2(self):
with self.assertRaises(TypeError):
java.lang.Boolean(tuple(), tuple())
def testByteBad2(self):
with self.assertRaises(TypeError):
java.lang.Byte(tuple(), tuple())
def testCharacterBad2(self):
with self.assertRaises(TypeError):
java.lang.Character(tuple(), tuple())
def testShortBad2(self):
with self.assertRaises(TypeError):
java.lang.Short(tuple(), tuple())
def testIntegerBad2(self):
with self.assertRaises(TypeError):
java.lang.Integer(tuple(), tuple())
def testLongBad2(self):
with self.assertRaises(TypeError):
java.lang.Long(tuple(), tuple())
def testFloatBad2(self):
with self.assertRaises(TypeError):
java.lang.Float(tuple(), tuple())
def testDoubleBad2(self):
with self.assertRaises(TypeError):
java.lang.Double(tuple(), tuple())
def compareTest(self, u, v):
self.assertEqual(u, v)
self.assertNotEqual(u, v - 1)
self.assertTrue(u > v - 1)
self.assertFalse(u > v + 1)
self.assertTrue(u >= v)
self.assertTrue(u <= v)
self.assertFalse(u < v)
self.assertFalse(u > v)
self.assertTrue(u < v + 1)
self.assertTrue(u > v - 1)
def testByteBoxOps(self):
u = JObject(81, JByte)
self.assertIsInstance(u, jpype.java.lang.Byte)
self.compareTest(u, 81)
def testCharBoxOps(self):
u = JObject('Q', JChar)
self.assertIsInstance(u, jpype.java.lang.Character)
self.compareTest(u, 81)
def testShortBoxOps(self):
u = JObject(81, JShort)
self.assertIsInstance(u, jpype.java.lang.Short)
self.compareTest(u, 81)
def testIntBoxOps(self):
u = JObject(81, JInt)
self.assertIsInstance(u, jpype.java.lang.Integer)
self.compareTest(u, 81)
def testLongBoxOps(self):
u = JObject(81, JLong)
self.assertIsInstance(u, jpype.java.lang.Long)
self.compareTest(u, 81)
def testIntBoxOps(self):
u = JObject(81, JFloat)
self.assertIsInstance(u, jpype.java.lang.Float)
self.compareTest(u, 81)
def testLongBoxOps(self):
u = JObject(81, JDouble)
self.assertIsInstance(u, jpype.java.lang.Double)
self.compareTest(u, 81)
def testCharBox(self):
u = passThrough(JChar('Q'))
self.assertIsInstance(u, jpype.java.lang.Character)
self.assertEqual(u, jpype.java.lang.Character('Q'))
def testBooleanBox(self):
u = passThrough(JBoolean(True))
self.assertIsInstance(u, jpype.java.lang.Boolean)
self.assertEqual(u, jpype.java.lang.Boolean(True))
self.assertEqual(u, True)
u = passThrough(JBoolean(False))
self.assertIsInstance(u, jpype.java.lang.Boolean)
self.assertEqual(u, jpype.java.lang.Boolean(False))
self.assertEqual(u, False)
def testByteBox(self):
u = passThrough(JByte(5))
self.assertIsInstance(u, java.lang.Byte)
self.assertEqual(u, java.lang.Byte(5))
def testShortBox(self):
u = passThrough(JShort(5))
self.assertIsInstance(u, java.lang.Short)
self.assertEqual(u, java.lang.Short(5))
def testIntBox(self):
u = passThrough(JInt(5))
self.assertIsInstance(u, java.lang.Integer)
self.assertEqual(u, java.lang.Integer(5))
def testLongBox(self):
u = passThrough(JLong(5))
self.assertIsInstance(u, java.lang.Long)
self.assertEqual(u, java.lang.Long(5))
def testFloatBox(self):
u = passThrough(JFloat(5))
self.assertIsInstance(u, java.lang.Float)
self.assertEqual(u, java.lang.Float(5))
def testDoubleBox(self):
u = passThrough(JDouble(5))
self.assertIsInstance(u, java.lang.Double)
self.assertEqual(u, java.lang.Double(5))
def testBooleanNull(self):
n = JObject(None, JBoolean)
self.assertIsInstance(n, java.lang.Boolean)
self.assertEqual(n, None)
self.assertNotEqual(n, True)
self.assertNotEqual(n, False)
with self.assertRaises(TypeError):
int(n)
with self.assertRaises(TypeError):
float(n)
self.assertEqual(str(n), str(None))
self.assertEqual(repr(n), str(None))
self.assertEqual(hash(n), hash(None))
u = passThrough(n)
self.assertEqual(u, None)
def testCharNull(self):
n = JObject(None, JChar)
self.assertIsInstance(n, java.lang.Character)
self.assertNotEqual(n, 0)
with self.assertRaises(TypeError):
int(n)
with self.assertRaises(TypeError):
float(n)
self.assertEqual(str(n), str(None))
self.assertEqual(repr(n), str(None))
self.assertEqual(hash(n), hash(None))
u = passThrough(n)
self.assertEqual(u, None)
def testByteNull(self):
n = JObject(None, JByte)
self.assertIsInstance(n, java.lang.Byte)
self.assertNotEqual(n, 0)
with self.assertRaises(TypeError):
int(n)
with self.assertRaises(TypeError):
float(n)
self.assertEqual(str(n), str(None))
self.assertEqual(repr(n), str(None))
self.assertEqual(hash(n), hash(None))
u = passThrough(n)
self.assertEqual(u, None)
def testShortNull(self):
n = JObject(None, JShort)
self.assertIsInstance(n, java.lang.Short)
self.assertNotEqual(n, 0)
with self.assertRaises(TypeError):
int(n)
with self.assertRaises(TypeError):
float(n)
self.assertEqual(str(n), str(None))
self.assertEqual(repr(n), str(None))
self.assertEqual(hash(n), hash(None))
u = passThrough(n)
self.assertEqual(u, None)
def testIntNull(self):
n = JObject(None, JInt)
self.assertIsInstance(n, java.lang.Integer)
self.assertNotEqual(n, 0)
with self.assertRaises(TypeError):
int(n)
with self.assertRaises(TypeError):
float(n)
self.assertEqual(str(n), str(None))
self.assertEqual(repr(n), str(None))
self.assertEqual(hash(n), hash(None))
u = passThrough(n)
self.assertEqual(u, None)
def testLongNull(self):
n = JObject(None, JLong)
self.assertIsInstance(n, java.lang.Long)
self.assertNotEqual(n, 0)
with self.assertRaises(TypeError):
int(n)
with self.assertRaises(TypeError):
float(n)
self.assertEqual(str(n), str(None))
self.assertEqual(repr(n), str(None))
self.assertEqual(hash(n), hash(None))
u = passThrough(n)
self.assertEqual(u, None)
def testFloatNull(self):
n = JObject(None, JFloat)
self.assertIsInstance(n, java.lang.Float)
self.assertNotEqual(n, 0)
self.assertNotEqual(n, 0.0)
with self.assertRaises(TypeError):
int(n)
with self.assertRaises(TypeError):
float(n)
self.assertEqual(str(n), str(None))
self.assertEqual(repr(n), str(None))
self.assertEqual(hash(n), hash(None))
u = passThrough(n)
self.assertEqual(u, None)
def testDoubleNull(self):
n = JObject(None, JDouble)
self.assertIsInstance(n, java.lang.Double)
self.assertNotEqual(n, 0)
self.assertNotEqual(n, 0.0)
with self.assertRaises(TypeError):
int(n)
with self.assertRaises(TypeError):
float(n)
self.assertEqual(str(n), str(None))
self.assertEqual(repr(n), str(None))
self.assertEqual(hash(n), hash(None))
u = passThrough(n)
self.assertEqual(u, None)
def testAsNumber(self):
self.assertIsInstance(java.lang.Byte(1), java.lang.Number)
self.assertIsInstance(java.lang.Short(1), java.lang.Number)
self.assertIsInstance(java.lang.Integer(1), java.lang.Number)
self.assertIsInstance(java.lang.Long(1), java.lang.Number)
self.assertIsInstance(java.lang.Float(1), java.lang.Number)
self.assertIsInstance(java.lang.Double(1), java.lang.Number)
|
|
import json
from collections import OrderedDict
from datetime import date, datetime, timedelta
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.db.models import Q, Sum
from django.template import loader
from django.utils.translation import ugettext, ugettext_lazy as _
from django_jsonfield_backport.models import JSONField
import olympia.core.logger
from olympia import amo
from olympia.abuse.models import AbuseReport
from olympia.access import acl
from olympia.addons.models import Addon, AddonApprovalsCounter
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ModelBase
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import cache_ns_key, send_mail
from olympia.constants.promoted import (
NOT_PROMOTED, PROMOTED_GROUPS_BY_ID, RECOMMENDED, PRE_REVIEW_GROUPS)
from olympia.files.models import FileValidation
from olympia.ratings.models import Rating
from olympia.reviewers.sql_model import RawSQLModel
from olympia.users.models import UserProfile
from olympia.versions.models import Version, version_uploaded
user_log = olympia.core.logger.getLogger('z.users')
log = olympia.core.logger.getLogger('z.reviewers')
VIEW_QUEUE_FLAGS = (
('needs_admin_code_review', 'needs-admin-code-review',
_('Needs Admin Code Review')),
('needs_admin_content_review', 'needs-admin-content-review',
_('Needs Admin Content Review')),
('needs_admin_theme_review', 'needs-admin-theme-review',
_('Needs Admin Static Theme Review')),
('is_restart_required', 'is_restart_required', _('Requires Restart')),
('sources_provided', 'sources-provided', _('Sources provided')),
('is_webextension', 'webextension', _('WebExtension')),
('auto_approval_delayed_temporarily', 'auto-approval-delayed-temporarily',
_('Auto-approval delayed temporarily')),
('auto_approval_delayed_indefinitely',
'auto-approval-delayed-indefinitely',
_('Auto-approval delayed indefinitely')),
)
def get_reviewing_cache_key(addon_id):
return 'review_viewing:{id}'.format(id=addon_id)
def clear_reviewing_cache(addon_id):
return cache.delete(get_reviewing_cache_key(addon_id))
def get_reviewing_cache(addon_id):
return cache.get(get_reviewing_cache_key(addon_id))
def set_reviewing_cache(addon_id, user_id):
# We want to save it for twice as long as the ping interval,
# just to account for latency and the like.
cache.set(get_reviewing_cache_key(addon_id),
user_id,
amo.REVIEWER_VIEWING_INTERVAL * 2)
class CannedResponse(ModelBase):
id = PositiveAutoField(primary_key=True)
name = models.CharField(max_length=255)
response = models.TextField()
sort_group = models.CharField(max_length=255)
type = models.PositiveIntegerField(
choices=amo.CANNED_RESPONSE_TYPE_CHOICES.items(), db_index=True,
default=0)
# Category is used only by code-manager
category = models.PositiveIntegerField(
choices=amo.CANNED_RESPONSE_CATEGORY_CHOICES.items(),
default=amo.CANNED_RESPONSE_CATEGORY_OTHER)
class Meta:
db_table = 'cannedresponses'
def __str__(self):
return str(self.name)
def get_flags(addon, version):
"""Return a list of tuples (indicating which flags should be displayed for
a particular add-on."""
flags = [(cls, title) for (prop, cls, title) in VIEW_QUEUE_FLAGS
if getattr(version, prop, getattr(addon, prop, None))]
# add in the promoted group flag and return
if promoted := addon.promoted_group(currently_approved=False):
flags.append((f'promoted-{promoted.api_name}', promoted.name))
return flags
def get_flags_for_row(record):
"""Like get_flags(), but for the queue pages, using fields directly
returned by the queues SQL query."""
flags = [(cls, title) for (prop, cls, title) in VIEW_QUEUE_FLAGS
if getattr(record, prop)]
# add in the promoted group flag and return
if promoted := record.promoted:
flags.append((f'promoted-{promoted.api_name}', promoted.name))
return flags
class ViewQueue(RawSQLModel):
id = models.IntegerField()
addon_name = models.CharField(max_length=255)
addon_slug = models.CharField(max_length=30)
addon_status = models.IntegerField()
addon_type_id = models.IntegerField()
auto_approval_delayed_temporarily = models.NullBooleanField()
auto_approval_delayed_indefinitely = models.NullBooleanField()
is_restart_required = models.BooleanField()
is_webextension = models.BooleanField()
latest_version = models.CharField(max_length=255)
needs_admin_code_review = models.NullBooleanField()
needs_admin_content_review = models.NullBooleanField()
needs_admin_theme_review = models.NullBooleanField()
promoted_group_id = models.IntegerField()
source = models.CharField(max_length=100)
waiting_time_days = models.IntegerField()
waiting_time_hours = models.IntegerField()
waiting_time_min = models.IntegerField()
recommendable_addons = False
def base_query(self):
return {
'select': OrderedDict([
('id', 'addons.id'),
('addon_name', 'tr.localized_string'),
('addon_status', 'addons.status'),
('addon_type_id', 'addons.addontype_id'),
('addon_slug', 'addons.slug'),
('auto_approval_delayed_temporarily', (
'TIMEDIFF(addons_addonreviewerflags.'
'auto_approval_delayed_until, NOW()) > 0 AND '
'EXTRACT(YEAR FROM addons_addonreviewerflags.'
'auto_approval_delayed_until) != 9999')),
('auto_approval_delayed_indefinitely', (
'TIMEDIFF(addons_addonreviewerflags.'
'auto_approval_delayed_until, NOW()) > 0 AND '
'EXTRACT(YEAR FROM addons_addonreviewerflags.'
'auto_approval_delayed_until) = 9999')),
('is_restart_required', 'MAX(files.is_restart_required)'),
('is_webextension', 'MAX(files.is_webextension)'),
('latest_version', 'versions.version'),
('needs_admin_code_review',
'addons_addonreviewerflags.needs_admin_code_review'),
('needs_admin_content_review',
'addons_addonreviewerflags.needs_admin_content_review'),
('needs_admin_theme_review',
'addons_addonreviewerflags.needs_admin_theme_review'),
('promoted_group_id', 'promoted.group_id'),
('source', 'versions.source'),
('waiting_time_days',
'TIMESTAMPDIFF(DAY, MAX(versions.nomination), NOW())'),
('waiting_time_hours',
'TIMESTAMPDIFF(HOUR, MAX(versions.nomination), NOW())'),
('waiting_time_min',
'TIMESTAMPDIFF(MINUTE, MAX(versions.nomination), NOW())'),
]),
'from': [
'addons',
"""
LEFT JOIN addons_addonreviewerflags ON (
addons.id = addons_addonreviewerflags.addon_id)
LEFT JOIN versions ON (addons.id = versions.addon_id)
LEFT JOIN versions_versionreviewerflags ON (
versions.id = versions_versionreviewerflags.version_id)
LEFT JOIN files ON (files.version_id = versions.id)
LEFT JOIN promoted_promotedaddon AS promoted ON (
addons.id = promoted.addon_id)
JOIN translations AS tr ON (
tr.id = addons.name
AND tr.locale = addons.defaultlocale)
"""
],
'where': [
'NOT addons.inactive', # disabled_by_user
'versions.channel = %s' % amo.RELEASE_CHANNEL_LISTED,
'files.status = %s' % amo.STATUS_AWAITING_REVIEW,
'versions_versionreviewerflags.pending_rejection IS NULL',
('NOT ' if not self.recommendable_addons else '') +
'(promoted.group_id = %s AND promoted.group_id IS NOT NULL)' %
RECOMMENDED.id,
],
'group_by': 'id'}
@property
def sources_provided(self):
return bool(self.source)
@property
def promoted(self):
return PROMOTED_GROUPS_BY_ID.get(self.promoted_group_id, NOT_PROMOTED)
@property
def flags(self):
return get_flags_for_row(self)
def _int_join(list_of_ints):
return ','.join(str(int(int_)) for int_ in list_of_ints)
class FullReviewQueueMixin:
def base_query(self):
query = super().base_query()
query['where'].append('addons.status = %s' % amo.STATUS_NOMINATED)
return query
class PendingQueueMixin:
def base_query(self):
query = super().base_query()
query['where'].append('addons.status = %s' % amo.STATUS_APPROVED)
return query
class CombinedReviewQueueMixin:
def base_query(self):
query = super().base_query()
query['where'].append(
f'addons.status IN ({_int_join(amo.VALID_ADDON_STATUSES)})')
return query
class ExtensionQueueMixin:
def base_query(self):
query = super().base_query()
types = _int_join(
set(amo.GROUP_TYPE_ADDON) - {amo.ADDON_SEARCH})
flags_table = 'addons_addonreviewerflags'
promoted_groups = _int_join(
group.id for group in PRE_REVIEW_GROUPS)
query['where'].append(
f'((addons.addontype_id IN ({types}) '
'AND files.is_webextension = 0) '
f'OR {flags_table}.auto_approval_disabled = 1 '
f'OR {flags_table}.auto_approval_disabled_until_next_approval = 1 '
f'OR {flags_table}.auto_approval_delayed_until > NOW() '
f'OR promoted.group_id IN ({promoted_groups})'
')'
)
return query
class ThemeQueueMixin:
def base_query(self):
query = super().base_query()
query['where'].append(
'addons.addontype_id = %s' % amo.ADDON_STATICTHEME)
return query
class ViewExtensionQueue(ExtensionQueueMixin, CombinedReviewQueueMixin,
ViewQueue):
pass
class ViewRecommendedQueue(CombinedReviewQueueMixin, ViewQueue):
recommendable_addons = True
class ViewThemeFullReviewQueue(ThemeQueueMixin, FullReviewQueueMixin,
ViewQueue):
pass
class ViewThemePendingQueue(ThemeQueueMixin, PendingQueueMixin, ViewQueue):
pass
class ViewUnlistedAllList(RawSQLModel):
id = models.IntegerField()
addon_name = models.CharField(max_length=255)
addon_slug = models.CharField(max_length=30)
guid = models.CharField(max_length=255)
_author_ids = models.CharField(max_length=255)
_author_usernames = models.CharField()
addon_status = models.IntegerField()
needs_admin_code_review = models.NullBooleanField()
needs_admin_content_review = models.NullBooleanField()
needs_admin_theme_review = models.NullBooleanField()
is_deleted = models.BooleanField()
def base_query(self):
return {
'select': OrderedDict([
('id', 'addons.id'),
('addon_name', 'tr.localized_string'),
('addon_status', 'addons.status'),
('addon_slug', 'addons.slug'),
('guid', 'addons.guid'),
('_author_ids', 'GROUP_CONCAT(authors.user_id)'),
('_author_usernames', 'GROUP_CONCAT(users.username)'),
('needs_admin_code_review',
'addons_addonreviewerflags.needs_admin_code_review'),
('needs_admin_content_review',
'addons_addonreviewerflags.needs_admin_content_review'),
('needs_admin_theme_review',
'addons_addonreviewerflags.needs_admin_theme_review'),
('is_deleted', 'IF (addons.status=11, true, false)'),
]),
'from': [
'addons',
"""
LEFT JOIN addons_addonreviewerflags ON (
addons.id = addons_addonreviewerflags.addon_id)
LEFT JOIN versions
ON (versions.addon_id = addons.id)
JOIN translations AS tr ON (
tr.id = addons.name AND
tr.locale = addons.defaultlocale)
LEFT JOIN addons_users AS authors
ON addons.id = authors.addon_id
LEFT JOIN users as users ON users.id = authors.user_id
"""
],
'where': [
'NOT addons.inactive', # disabled_by_user
'versions.channel = %s' % amo.RELEASE_CHANNEL_UNLISTED,
'addons.status <> %s' % amo.STATUS_DISABLED
],
'group_by': 'id'}
@property
def authors(self):
ids = self._explode_concat(self._author_ids)
usernames = self._explode_concat(self._author_usernames, cast=str)
return list(set(zip(ids, usernames)))
class PerformanceGraph(RawSQLModel):
id = models.IntegerField()
yearmonth = models.CharField(max_length=7)
approval_created = models.DateTimeField()
user_id = models.IntegerField()
total = models.IntegerField()
def base_query(self):
request_ver = amo.LOG.REQUEST_VERSION.id
review_ids = [str(r) for r in amo.LOG_REVIEWER_REVIEW_ACTION
if r != request_ver]
return {
'select': OrderedDict([
('yearmonth',
"DATE_FORMAT(`log_activity`.`created`, '%%Y-%%m')"),
('approval_created', '`log_activity`.`created`'),
('user_id', '`log_activity`.`user_id`'),
('total', 'COUNT(*)')
]),
'from': [
'log_activity',
],
'where': [
'log_activity.action in (%s)' % ','.join(review_ids),
'user_id <> %s' % settings.TASK_USER_ID # No auto-approvals.
],
'group_by': 'yearmonth, user_id'
}
class ReviewerSubscription(ModelBase):
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
channel = models.PositiveSmallIntegerField(
choices=amo.RELEASE_CHANNEL_CHOICES)
class Meta:
db_table = 'editor_subscriptions'
def send_notification(self, version):
user_log.info('Sending addon update notice to %s for %s' %
(self.user.email, self.addon.pk))
if version.channel == amo.RELEASE_CHANNEL_LISTED:
listing_url = absolutify(reverse('addons.detail',
args=[self.addon.pk],
add_prefix=False))
else:
# If the submission went to the unlisted channel,
# do not link to the listing.
listing_url = None
context = {
'name': self.addon.name,
'url': listing_url,
'number': version.version,
'review': absolutify(reverse(
'reviewers.review',
kwargs={'addon_id': self.addon.pk,
'channel': amo.CHANNEL_CHOICES_API[version.channel]},
add_prefix=False)),
'SITE_URL': settings.SITE_URL,
}
# Not being localised because we don't know the reviewer's locale.
subject = 'Mozilla Add-ons: %s Updated' % self.addon.name
template = loader.get_template('reviewers/emails/notify_update.ltxt')
send_mail(subject, template.render(context),
recipient_list=[self.user.email],
from_email=settings.ADDONS_EMAIL,
use_deny_list=False)
def send_notifications(sender=None, instance=None, signal=None, **kw):
subscribers = instance.addon.reviewersubscription_set.all()
if not subscribers:
return
listed_perms = [
amo.permissions.ADDONS_REVIEW,
amo.permissions.ADDONS_CONTENT_REVIEW,
amo.permissions.ADDONS_RECOMMENDED_REVIEW,
amo.permissions.STATIC_THEMES_REVIEW,
amo.permissions.REVIEWER_TOOLS_VIEW
]
for subscriber in subscribers:
user = subscriber.user
is_active_user = user and not user.deleted and user.email
is_reviewer_and_listed_submission = (
subscriber.channel == amo.RELEASE_CHANNEL_LISTED and
instance.channel == amo.RELEASE_CHANNEL_LISTED and
any(acl.action_allowed_user(user, perm) for perm in listed_perms))
is_unlisted_reviewer_and_unlisted_submission = (
subscriber.channel == amo.RELEASE_CHANNEL_UNLISTED and
instance.channel == amo.RELEASE_CHANNEL_UNLISTED and
acl.action_allowed_user(user,
amo.permissions.ADDONS_REVIEW_UNLISTED))
if is_active_user and (
is_reviewer_and_listed_submission or
is_unlisted_reviewer_and_unlisted_submission
):
subscriber.send_notification(instance)
version_uploaded.connect(send_notifications, dispatch_uid='send_notifications')
class ReviewerScore(ModelBase):
id = PositiveAutoField(primary_key=True)
user = models.ForeignKey(
UserProfile, related_name='_reviewer_scores', on_delete=models.CASCADE)
addon = models.ForeignKey(
Addon, blank=True, null=True, related_name='+',
on_delete=models.CASCADE)
version = models.ForeignKey(
Version, blank=True, null=True, related_name='+',
on_delete=models.CASCADE)
score = models.IntegerField()
# For automated point rewards.
note_key = models.SmallIntegerField(choices=amo.REVIEWED_CHOICES.items(),
default=0)
# For manual point rewards with a note.
note = models.CharField(max_length=255)
class Meta:
db_table = 'reviewer_scores'
ordering = ('-created',)
indexes = [
models.Index(fields=('addon',),
name='reviewer_scores_addon_id_fk'),
models.Index(fields=('created',),
name='reviewer_scores_created_idx'),
models.Index(fields=('user',),
name='reviewer_scores_user_id_idx'),
models.Index(fields=('version',),
name='reviewer_scores_version_id'),
]
@classmethod
def get_key(cls, key=None, invalidate=False):
namespace = 'riscore'
if not key: # Assuming we're invalidating the namespace.
cache_ns_key(namespace, invalidate)
return
else:
# Using cache_ns_key so each cache val is invalidated together.
ns_key = cache_ns_key(namespace, invalidate)
return '%s:%s' % (ns_key, key)
@classmethod
def get_event(cls, addon, status, version=None, post_review=False,
content_review=False):
"""Return the review event type constant.
This is determined by the addon.type and the queue the addon is
currently in (which is determined from the various parameters sent
down from award_points()).
Note: We're not using addon.status or addon.current_version because
this is called after the status/current_version might have been updated
by the reviewer action.
"""
reviewed_score_name = None
if content_review:
# Content review always gives the same amount of points.
reviewed_score_name = 'REVIEWED_CONTENT_REVIEW'
elif post_review:
# There are 4 tiers of post-review scores depending on the addon
# weight.
try:
if version is None:
raise AutoApprovalSummary.DoesNotExist
weight = version.autoapprovalsummary.weight
except AutoApprovalSummary.DoesNotExist as exception:
log.exception(
'No such version/auto approval summary when determining '
'event type to award points: %r', exception)
weight = 0
if addon.type == amo.ADDON_DICT:
reviewed_score_name = 'REVIEWED_DICT_FULL'
elif addon.type in [amo.ADDON_LPAPP, amo.ADDON_LPADDON]:
reviewed_score_name = 'REVIEWED_LP_FULL'
elif addon.type == amo.ADDON_SEARCH:
reviewed_score_name = 'REVIEWED_SEARCH_FULL'
elif weight > amo.POST_REVIEW_WEIGHT_HIGHEST_RISK:
reviewed_score_name = 'REVIEWED_EXTENSION_HIGHEST_RISK'
elif weight > amo.POST_REVIEW_WEIGHT_HIGH_RISK:
reviewed_score_name = 'REVIEWED_EXTENSION_HIGH_RISK'
elif weight > amo.POST_REVIEW_WEIGHT_MEDIUM_RISK:
reviewed_score_name = 'REVIEWED_EXTENSION_MEDIUM_RISK'
else:
reviewed_score_name = 'REVIEWED_EXTENSION_LOW_RISK'
else:
if status == amo.STATUS_NOMINATED:
queue = 'FULL'
elif status == amo.STATUS_APPROVED:
queue = 'UPDATE'
else:
queue = ''
if (addon.type in [amo.ADDON_EXTENSION, amo.ADDON_PLUGIN,
amo.ADDON_API] and queue):
reviewed_score_name = 'REVIEWED_ADDON_%s' % queue
elif addon.type == amo.ADDON_DICT and queue:
reviewed_score_name = 'REVIEWED_DICT_%s' % queue
elif addon.type in [amo.ADDON_LPAPP, amo.ADDON_LPADDON] and queue:
reviewed_score_name = 'REVIEWED_LP_%s' % queue
elif addon.type == amo.ADDON_STATICTHEME:
reviewed_score_name = 'REVIEWED_STATICTHEME'
elif addon.type == amo.ADDON_SEARCH and queue:
reviewed_score_name = 'REVIEWED_SEARCH_%s' % queue
if reviewed_score_name:
return getattr(amo, reviewed_score_name)
return None
@classmethod
def award_points(cls, user, addon, status, version=None,
post_review=False, content_review=False,
extra_note=''):
"""Awards points to user based on an event and the queue.
`event` is one of the `REVIEWED_` keys in constants.
`status` is one of the `STATUS_` keys in constants.
`version` is the `Version` object that was affected by the review.
`post_review` is set to True if the add-on was auto-approved and the
reviewer is confirming/rejecting post-approval.
`content_review` is set to True if it's a content-only review of an
auto-approved add-on.
"""
# If a webextension file gets approved manually (e.g. because
# auto-approval is disabled), 'post-review' is set to False, treating
# the file as a legacy file which is not what we want. The file is
# still a webextension and should treated as such, regardless of
# auto-approval being disabled or not.
# As a hack, we set 'post_review' to True.
if (version and
version.is_webextension and
addon.type in amo.GROUP_TYPE_ADDON):
post_review = True
user_log.info(
(u'Determining award points for user %s for version %s of addon %s'
% (user, version, addon.id)).encode('utf-8'))
event = cls.get_event(
addon, status, version=version, post_review=post_review,
content_review=content_review)
score = amo.REVIEWED_SCORES.get(event)
user_log.info(
(u'Determined %s award points (event: %s) for user %s for version '
u'%s of addon %s' % (score, event, user, version, addon.id))
.encode('utf-8'))
# Add bonus to reviews greater than our limit to encourage fixing
# old reviews. Does not apply to content-review/post-review at the
# moment, because it would need to be calculated differently.
award_overdue_bonus = (
version and version.nomination and
not post_review and not content_review)
if award_overdue_bonus:
waiting_time_days = (datetime.now() - version.nomination).days
days_over = waiting_time_days - amo.REVIEWED_OVERDUE_LIMIT
if days_over > 0:
bonus = days_over * amo.REVIEWED_OVERDUE_BONUS
score = score + bonus
if score is not None:
cls.objects.create(user=user, addon=addon, score=score,
note_key=event, note=extra_note,
version=version)
cls.get_key(invalidate=True)
user_log.info(
(u'Awarding %s points to user %s for "%s" for addon %s' % (
score, user, amo.REVIEWED_CHOICES[event], addon.id))
.encode('utf-8'))
return score
@classmethod
def award_moderation_points(cls, user, addon, review_id, undo=False):
"""Awards points to user based on moderated review."""
event = (amo.REVIEWED_ADDON_REVIEW if not undo else
amo.REVIEWED_ADDON_REVIEW_POORLY)
score = amo.REVIEWED_SCORES.get(event)
cls.objects.create(user=user, addon=addon, score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info(
u'Awarding %s points to user %s for "%s" for review %s' % (
score, user, amo.REVIEWED_CHOICES[event], review_id))
@classmethod
def get_total(cls, user):
"""Returns total points by user."""
key = cls.get_key('get_total:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = list(ReviewerScore.objects.filter(user=user)
.aggregate(total=Sum('score'))
.values())[0]
if val is None:
val = 0
cache.set(key, val, None)
return val
@classmethod
def get_recent(cls, user, limit=5, addon_type=None):
"""Returns most recent ReviewerScore records."""
key = cls.get_key('get_recent:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = ReviewerScore.objects.filter(user=user)
if addon_type is not None:
val.filter(addon__type=addon_type)
val = list(val[:limit])
cache.set(key, val, None)
return val
@classmethod
def get_breakdown(cls, user):
"""Returns points broken down by addon type."""
key = cls.get_key('get_breakdown:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`,
`addons`.`addontype_id` AS `atype`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s
GROUP BY `addons`.`addontype_id`
ORDER BY `total` DESC
"""
val = list(ReviewerScore.objects.raw(sql, [user.id]))
cache.set(key, val, None)
return val
@classmethod
def get_breakdown_since(cls, user, since):
"""
Returns points broken down by addon type since the given datetime.
"""
key = cls.get_key('get_breakdown:%s:%s' % (user.id, since.isoformat()))
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`,
`addons`.`addontype_id` AS `atype`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s AND
`reviewer_scores`.`created` >= %s
GROUP BY `addons`.`addontype_id`
ORDER BY `total` DESC
"""
val = list(ReviewerScore.objects.raw(sql, [user.id, since]))
cache.set(key, val, 3600)
return val
@classmethod
def _leaderboard_list(cls, since=None, types=None, addon_type=None):
"""
Returns base leaderboard list. Each item will be a tuple containing
(user_id, name, total).
"""
reviewers = (UserProfile.objects
.filter(groups__name__startswith='Reviewers: ')
.exclude(groups__name__in=('Admins',
'No Reviewer Incentives'))
.distinct())
qs = (cls.objects
.values_list('user__id')
.filter(user__in=reviewers)
.annotate(total=Sum('score'))
.order_by('-total'))
if since is not None:
qs = qs.filter(created__gte=since)
if types is not None:
qs = qs.filter(note_key__in=types)
if addon_type is not None:
qs = qs.filter(addon__type=addon_type)
users = {reviewer.pk: reviewer for reviewer in reviewers}
return [
(item[0], users.get(item[0], UserProfile()).name, item[1])
for item in qs]
@classmethod
def get_leaderboards(cls, user, days=7, types=None, addon_type=None):
"""Returns leaderboards with ranking for the past given days.
This will return a dict of 3 items::
{'leader_top': [...],
'leader_near: [...],
'user_rank': (int)}
If the user is not in the leaderboard, or if the user is in the top 5,
'leader_near' will be an empty list and 'leader_top' will contain 5
elements instead of the normal 3.
"""
key = cls.get_key('get_leaderboards:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
week_ago = date.today() - timedelta(days=days)
leader_top = []
leader_near = []
leaderboard = cls._leaderboard_list(
since=week_ago, types=types, addon_type=addon_type)
scores = []
user_rank = 0
in_leaderboard = False
for rank, row in enumerate(leaderboard, 1):
user_id, name, total = row
scores.append({
'user_id': user_id,
'name': name,
'rank': rank,
'total': int(total),
})
if user_id == user.id:
user_rank = rank
in_leaderboard = True
if not in_leaderboard:
leader_top = scores[:5]
else:
if user_rank <= 5: # User is in top 5, show top 5.
leader_top = scores[:5]
else:
leader_top = scores[:3]
leader_near = [scores[user_rank - 2], scores[user_rank - 1]]
try:
leader_near.append(scores[user_rank])
except IndexError:
pass # User is last on the leaderboard.
val = {
'leader_top': leader_top,
'leader_near': leader_near,
'user_rank': user_rank,
}
cache.set(key, val, None)
return val
@classmethod
def all_users_by_score(cls):
"""
Returns reviewers ordered by highest total points first.
"""
leaderboard = cls._leaderboard_list()
scores = []
for row in leaderboard:
user_id, name, total = row
user_level = len(amo.REVIEWED_LEVELS) - 1
for i, level in enumerate(amo.REVIEWED_LEVELS):
if total < level['points']:
user_level = i - 1
break
# Only show level if it changes.
if user_level < 0:
level = ''
else:
level = str(amo.REVIEWED_LEVELS[user_level]['name'])
scores.append({
'user_id': user_id,
'name': name,
'total': int(total),
'level': level,
})
prev = None
for score in reversed(scores):
if score['level'] == prev:
score['level'] = ''
else:
prev = score['level']
return scores
class AutoApprovalNotEnoughFilesError(Exception):
pass
class AutoApprovalNoValidationResultError(Exception):
pass
class AutoApprovalSummary(ModelBase):
"""Model holding the results of an auto-approval attempt on a Version."""
version = models.OneToOneField(
Version, on_delete=models.CASCADE, primary_key=True)
is_locked = models.BooleanField(
default=False,
help_text=_('Is locked by a reviewer'))
has_auto_approval_disabled = models.BooleanField(
default=False,
help_text=_('Has auto-approval disabled/delayed flag set'))
is_promoted_prereview = models.BooleanField(
default=False,
null=True, # TODO: remove this once code has deployed to prod.
help_text=_('Is in a promoted addon group that requires pre-review'))
should_be_delayed = models.BooleanField(
default=False,
help_text=_("Delayed because it's the first listed version"))
is_blocked = models.BooleanField(
default=False,
help_text=_('Version string and guid match a blocklist Block'))
verdict = models.PositiveSmallIntegerField(
choices=amo.AUTO_APPROVAL_VERDICT_CHOICES,
default=amo.NOT_AUTO_APPROVED)
weight = models.IntegerField(default=0)
weight_info = JSONField(default=dict, null=True)
confirmed = models.NullBooleanField(default=None)
class Meta:
db_table = 'editors_autoapprovalsummary'
# List of fields to check when determining whether a version should be
# auto-approved or not. Each should be a boolean, a value of true means
# the version will *not* auto-approved. Each should have a corresponding
# check_<reason>(version) classmethod defined that will be used by
# create_summary_for_version() to set the corresponding field on the
# instance.
auto_approval_verdict_fields = (
'has_auto_approval_disabled',
'is_locked',
'is_promoted_prereview',
'should_be_delayed',
'is_blocked',
)
def __str__(self):
return u'%s %s' % (self.version.addon.name, self.version)
def calculate_weight(self):
"""Calculate the weight value for this version according to various
risk factors, setting the weight (an integer) and weight_info (a dict
of risk factors strings -> integer values) properties on the instance.
The weight value is then used in reviewer tools to prioritize add-ons
in the auto-approved queue, the weight_info shown to reviewers in the
review page."""
addon = self.version.addon
one_year_ago = (self.created or datetime.now()) - timedelta(days=365)
six_weeks_ago = (self.created or datetime.now()) - timedelta(days=42)
self.weight_info = {
# Add-ons under admin code review: 100 added to weight.
'admin_code_review': 100 if addon.needs_admin_code_review else 0,
# Each abuse reports for the add-on or one of the listed developers
# in the last 6 weeks adds 15 to the weight, up to a maximum of
# 100.
'abuse_reports': min(
AbuseReport.objects
.filter(Q(addon=addon) | Q(user__in=addon.listed_authors))
.filter(created__gte=six_weeks_ago).count() * 15, 100),
# 1% of the total of "recent" ratings with a score of 3 or less
# adds 2 to the weight, up to a maximum of 100.
'negative_ratings': min(int(
Rating.objects
.filter(addon=addon)
.filter(rating__lte=3, created__gte=one_year_ago)
.count() / 100.0 * 2.0), 100),
# Reputation is set by admin - the value is inverted to add from
# -300 (decreasing priority for "trusted" add-ons) to 0.
'reputation': (
max(min(int(addon.reputation or 0) * -100, 0), -300)),
# Average daily users: value divided by 10000 is added to the
# weight, up to a maximum of 100.
'average_daily_users': min(
addon.average_daily_users // 10000, 100),
# Pas rejection history: each "recent" rejected version (disabled
# with an original status of null, so not disabled by a developer)
# adds 10 to the weight, up to a maximum of 100.
'past_rejection_history': min(
Version.objects
.filter(addon=addon,
files__reviewed__gte=one_year_ago,
files__original_status=amo.STATUS_NULL,
files__status=amo.STATUS_DISABLED)
.distinct().count() * 10, 100),
}
self.weight_info.update(
self.calculate_static_analysis_weight_factors())
self.weight = sum(self.weight_info.values())
return self.weight_info
def calculate_static_analysis_weight_factors(self):
"""Calculate the static analysis risk factors, returning a dict of
risk factors.
Used by calculate_weight()."""
try:
innerhtml_count = self.count_uses_innerhtml(self.version)
unknown_minified_code_count = (
self.count_uses_unknown_minified_code(self.version))
factors = {
# Static analysis flags from linter:
# eval() or document.write(): 50.
'uses_eval_or_document_write': (
50 if self.count_uses_eval_or_document_write(self.version)
else 0),
# Implied eval in setTimeout/setInterval/ on* attributes: 5.
'uses_implied_eval': (
5 if self.count_uses_implied_eval(self.version)
else 0),
# innerHTML / unsafe DOM: 50+10 per instance.
'uses_innerhtml': (
50 + 10 * (innerhtml_count - 1) if innerhtml_count else 0),
# custom CSP: 90.
'uses_custom_csp': (
90 if self.count_uses_custom_csp(self.version)
else 0),
# nativeMessaging permission: 100.
'uses_native_messaging': (
100 if self.check_uses_native_messaging(self.version)
else 0),
# remote scripts: 100.
'uses_remote_scripts': (
100 if self.count_uses_remote_scripts(self.version)
else 0),
# violates mozilla conditions of use: 20.
'violates_mozilla_conditions': (
20 if self.count_violates_mozilla_conditions(self.version)
else 0),
# libraries of unreadable code: 100+10 per instance.
'uses_unknown_minified_code': (
100 + 10 * (unknown_minified_code_count - 1)
if unknown_minified_code_count else 0),
# Size of code changes: 5kB is one point, up to a max of 100.
'size_of_code_changes': min(
self.calculate_size_of_code_changes() // 5000, 100),
# Seems to be using a coinminer: 2000
'uses_coinminer': (
2000 if self.count_uses_uses_coinminer(self.version)
else 0),
}
except AutoApprovalNoValidationResultError:
# We should have a FileValidationResult... since we don't and
# something is wrong, increase the weight by 500.
factors = {
'no_validation_result': 500,
}
return factors
def get_pretty_weight_info(self):
"""Returns a list of strings containing weight information."""
if self.weight_info:
weight_info = sorted(['%s: %d' % (k, v)
for k, v in self.weight_info.items() if v])
else:
weight_info = [ugettext('Risk breakdown not available.')]
return weight_info
def find_previous_confirmed_version(self):
"""Return the most recent version in the add-on history that has been
confirmed, excluding the one this summary is about, or None if there
isn't one."""
addon = self.version.addon
try:
version = addon.versions.exclude(pk=self.version.pk).filter(
autoapprovalsummary__confirmed=True).latest()
except Version.DoesNotExist:
version = None
return version
def calculate_size_of_code_changes(self):
"""Return the size of code changes between the version being
approved and the previous public one."""
def find_code_size(version):
# There could be multiple files: if that's the case, take the
# total for all files and divide it by the number of files.
number_of_files = len(version.all_files) or 1
total_code_size = 0
for file_ in version.all_files:
data = json.loads(file_.validation.validation)
total_code_size += (
data.get('metadata', {}).get('totalScannedFileSize', 0))
return total_code_size // number_of_files
try:
old_version = self.find_previous_confirmed_version()
old_size = find_code_size(old_version) if old_version else 0
new_size = find_code_size(self.version)
except FileValidation.DoesNotExist:
raise AutoApprovalNoValidationResultError()
# We don't really care about whether it's a negative or positive change
# in size, we just need the absolute value (if there is no current
# public version, that value ends up being the total code size of the
# version we're approving).
return abs(old_size - new_size)
def calculate_verdict(self, dry_run=False, pretty=False):
"""Calculate the verdict for this instance based on the values set
on it previously and the current configuration.
Return a dict containing more information about what critera passed
or not."""
if dry_run:
success_verdict = amo.WOULD_HAVE_BEEN_AUTO_APPROVED
failure_verdict = amo.WOULD_NOT_HAVE_BEEN_AUTO_APPROVED
else:
success_verdict = amo.AUTO_APPROVED
failure_verdict = amo.NOT_AUTO_APPROVED
verdict_info = {
key: bool(getattr(self, key))
for key in self.auto_approval_verdict_fields
}
if any(verdict_info.values()):
self.verdict = failure_verdict
else:
self.verdict = success_verdict
if pretty:
verdict_info = self.verdict_info_prettifier(verdict_info)
return verdict_info
@classmethod
def verdict_info_prettifier(cls, verdict_info):
"""Return a generator of strings representing the a verdict_info
(as computed by calculate_verdict()) in human-readable form."""
return (
str(cls._meta.get_field(key).help_text)
for key, value in sorted(verdict_info.items())
if value
)
@classmethod
def _count_linter_flag(cls, version, flag):
def _count_linter_flag_in_file(file_):
try:
validation = file_.validation
except FileValidation.DoesNotExist:
raise AutoApprovalNoValidationResultError()
validation_data = json.loads(validation.validation)
return sum(flag in message['id']
for message in validation_data.get('messages', []))
return max(_count_linter_flag_in_file(file_)
for file_ in version.all_files)
@classmethod
def _count_metadata_property(cls, version, prop):
def _count_property_in_linter_metadata_in_file(file_):
try:
validation = file_.validation
except FileValidation.DoesNotExist:
raise AutoApprovalNoValidationResultError()
validation_data = json.loads(validation.validation)
return len(validation_data.get(
'metadata', {}).get(prop, []))
return max(_count_property_in_linter_metadata_in_file(file_)
for file_ in version.all_files)
@classmethod
def count_uses_unknown_minified_code(cls, version):
return cls._count_metadata_property(version, 'unknownMinifiedFiles')
@classmethod
def count_violates_mozilla_conditions(cls, version):
return cls._count_linter_flag(version, 'MOZILLA_COND_OF_USE')
@classmethod
def count_uses_remote_scripts(cls, version):
return cls._count_linter_flag(version, 'REMOTE_SCRIPT')
@classmethod
def count_uses_eval_or_document_write(cls, version):
return (
cls._count_linter_flag(version, 'NO_DOCUMENT_WRITE') or
cls._count_linter_flag(version, 'DANGEROUS_EVAL'))
@classmethod
def count_uses_implied_eval(cls, version):
return cls._count_linter_flag(version, 'NO_IMPLIED_EVAL')
@classmethod
def count_uses_innerhtml(cls, version):
return cls._count_linter_flag(version, 'UNSAFE_VAR_ASSIGNMENT')
@classmethod
def count_uses_custom_csp(cls, version):
return cls._count_linter_flag(version, 'MANIFEST_CSP')
@classmethod
def count_uses_uses_coinminer(cls, version):
return cls._count_linter_flag(version, 'COINMINER_USAGE_DETECTED')
@classmethod
def check_uses_native_messaging(cls, version):
return any('nativeMessaging' in file_.permissions
for file_ in version.all_files)
@classmethod
def check_is_locked(cls, version):
"""Check whether the add-on is locked by a reviewer.
Doesn't apply to langpacks, which are submitted as part of Firefox
release process and should always be auto-approved."""
is_langpack = version.addon.type == amo.ADDON_LPAPP
locked_by = get_reviewing_cache(version.addon.pk)
return (
not is_langpack and
bool(locked_by) and
locked_by != settings.TASK_USER_ID)
@classmethod
def check_has_auto_approval_disabled(cls, version):
"""Check whether the add-on has auto approval fully disabled by a
reviewer (only applies to listed) or disabled temporarily because they
had their previous version on a delayed rejection (only applies to
listed) or automated scanners (applies to every channel).
"""
addon = version.addon
is_listed = version.channel == amo.RELEASE_CHANNEL_LISTED
auto_approval_disabled = is_listed and bool(
addon.auto_approval_disabled or
addon.auto_approval_disabled_until_next_approval
)
auto_approval_delayed = bool(
addon.auto_approval_delayed_until and
datetime.now() < addon.auto_approval_delayed_until
)
return auto_approval_disabled or auto_approval_delayed
@classmethod
def check_is_promoted_prereview(cls, version):
"""Check whether the add-on is a promoted addon group that requires
pre-review.
Only applies to listed versions."""
if not version.channel == amo.RELEASE_CHANNEL_LISTED:
return False
promo_group = version.addon.promoted_group(currently_approved=False)
return bool(promo_group and promo_group.pre_review)
@classmethod
def check_should_be_delayed(cls, version):
"""Check whether the add-on new enough that the auto-approval of the
version should be delayed for 24 hours to catch spam.
Doesn't apply to langpacks, which are submitted as part of Firefox
release process and should always be auto-approved.
Only applies to listed versions.
"""
addon = version.addon
is_langpack = addon.type == amo.ADDON_LPAPP
now = datetime.now()
nomination = version.nomination or addon.created
try:
content_review = addon.addonapprovalscounter.last_content_review
except AddonApprovalsCounter.DoesNotExist:
content_review = None
return (
not is_langpack and
version.channel == amo.RELEASE_CHANNEL_LISTED and
version.addon.status == amo.STATUS_NOMINATED and
now - nomination < timedelta(hours=24) and
content_review is None)
@classmethod
def check_is_blocked(cls, version):
"""Check if the version matches a Block in the blocklist. Such uploads
would have been prevented, but if it was uploaded before the Block was
created, it's possible it'll still be pending."""
return version.is_blocked
@classmethod
def create_summary_for_version(cls, version, dry_run=False):
"""Create a AutoApprovalSummary instance in db from the specified
version.
Return a tuple with the AutoApprovalSummary instance as first item,
and a dict containing information about the auto approval verdict as
second item.
If dry_run parameter is True, then the instance is created/updated
normally but when storing the verdict the WOULD_ constants are used
instead.
If not using dry_run it's the caller responsability to approve the
version to make sure the AutoApprovalSummary is not overwritten later
when the auto-approval process fires again."""
if len(version.all_files) == 0:
raise AutoApprovalNotEnoughFilesError()
data = {
field: getattr(cls, f'check_{field}')(version)
for field in cls.auto_approval_verdict_fields
}
instance = cls(version=version, **data)
verdict_info = instance.calculate_verdict(dry_run=dry_run)
instance.calculate_weight()
# We can't do instance.save(), because we want to handle the case where
# it already existed. So we put the verdict and weight we just
# calculated in data and use update_or_create().
data['verdict'] = instance.verdict
data['weight'] = instance.weight
data['weight_info'] = instance.weight_info
instance, _ = cls.objects.update_or_create(
version=version, defaults=data)
return instance, verdict_info
class Whiteboard(ModelBase):
addon = models.OneToOneField(
Addon, on_delete=models.CASCADE, primary_key=True)
private = models.TextField(blank=True)
public = models.TextField(blank=True)
class Meta:
db_table = 'review_whiteboard'
def __str__(self):
return u'[%s] private: |%s| public: |%s|' % (
self.addon.name, self.private, self.public)
|
|
# -*- coding: utf-8 -*-
import json
import logging
import os
import socket
import sys
import tempfile
import traceback
import urllib2
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.management import call_command
from celeryutils import task
from django_statsd.clients import statsd
from tower import ugettext as _
import amo
from amo.decorators import write, set_modified_on
from amo.utils import resize_image, send_html_mail_jinja
from addons.models import Addon
from applications.management.commands import dump_apps
from applications.models import AppVersion
from devhub import perf
from files.helpers import copyfileobj
from files.models import FileUpload, File, FileValidation
from PIL import Image
log = logging.getLogger('z.devhub.task')
@task
@write
def validator(upload_id, **kw):
if not settings.VALIDATE_ADDONS:
return None
log.info('VALIDATING: %s' % upload_id)
upload = FileUpload.objects.using('default').get(pk=upload_id)
try:
result = run_validator(upload.path)
upload.validation = result
upload.save() # We want to hit the custom save().
except:
# Store the error with the FileUpload job, then raise
# it for normal logging.
tb = traceback.format_exception(*sys.exc_info())
upload.update(task_error=''.join(tb))
raise
@task
@write
def compatibility_check(upload_id, app_guid, appversion_str, **kw):
if not settings.VALIDATE_ADDONS:
return None
log.info('COMPAT CHECK for upload %s / app %s version %s'
% (upload_id, app_guid, appversion_str))
upload = FileUpload.objects.get(pk=upload_id)
app = amo.APP_GUIDS.get(app_guid)
appver = AppVersion.objects.get(application=app.id, version=appversion_str)
try:
result = run_validator(
upload.path,
for_appversions={app_guid: [appversion_str]},
test_all_tiers=True,
# Ensure we only check compatibility against this one specific
# version:
overrides={'targetapp_minVersion': {app_guid: appversion_str},
'targetapp_maxVersion': {app_guid: appversion_str}},
compat=True)
upload.validation = result
upload.compat_with_app = app.id
upload.compat_with_appver = appver
upload.save() # We want to hit the custom save().
except:
# Store the error with the FileUpload job, then raise
# it for normal logging.
tb = traceback.format_exception(*sys.exc_info())
upload.update(task_error=''.join(tb))
raise
@task
@write
def file_validator(file_id, **kw):
if not settings.VALIDATE_ADDONS:
return None
log.info('VALIDATING file: %s' % file_id)
file = File.objects.get(pk=file_id)
result = run_validator(file.file_path)
return FileValidation.from_json(file, result)
def run_validator(file_path, for_appversions=None, test_all_tiers=False,
overrides=None, compat=False):
"""A pre-configured wrapper around the addon validator.
*file_path*
Path to addon / extension file to validate.
*for_appversions=None*
An optional dict of application versions to validate this addon
for. The key is an application GUID and its value is a list of
versions.
*test_all_tiers=False*
When False (default) the validator will not continue if it
encounters fatal errors. When True, all tests in all tiers are run.
See bug 615426 for discussion on this default.
*overrides=None*
Normally the validator gets info from install.rdf but there are a
few things we need to override. See validator for supported overrides.
Example: {'targetapp_maxVersion': {'<app guid>': '<version>'}}
*compat=False*
Set this to `True` when performing a bulk validation. This allows the
validator to ignore certain tests that should not be run during bulk
validation (see bug 735841).
To validate the addon for compatibility with Firefox 5 and 6,
you'd pass in::
for_appversions={amo.FIREFOX.guid: ['5.0.*', '6.0.*']}
Not all application versions will have a set of registered
compatibility tests.
"""
from validator.validate import validate
apps = dump_apps.Command.JSON_PATH
if not os.path.exists(apps):
call_command('dump_apps')
path = file_path
if path and not os.path.exists(path) and storage.exists(path):
path = tempfile.mktemp(suffix='_' + os.path.basename(file_path))
with open(path, 'wb') as f:
copyfileobj(storage.open(file_path), f)
temp = True
else:
temp = False
try:
with statsd.timer('devhub.validator'):
return validate(path,
for_appversions=for_appversions,
format='json',
# When False, this flag says to stop testing after
# one tier fails.
determined=test_all_tiers,
approved_applications=apps,
spidermonkey=settings.SPIDERMONKEY,
overrides=overrides,
timeout=settings.VALIDATOR_TIMEOUT,
compat_test=compat)
finally:
if temp:
os.remove(path)
@task(rate_limit='4/m')
@write
def flag_binary(ids, **kw):
log.info('[%s@%s] Flagging binary addons starting with id: %s...'
% (len(ids), flag_binary.rate_limit, ids[0]))
addons = Addon.objects.filter(pk__in=ids).no_transforms()
latest = kw.pop('latest', True)
for addon in addons:
try:
log.info('Validating addon with id: %s' % addon.pk)
files = (File.objects.filter(version__addon=addon)
.exclude(status=amo.STATUS_DISABLED)
.order_by('-created'))
if latest:
files = [files[0]]
for file in files:
result = json.loads(run_validator(file.file_path))
metadata = result['metadata']
binary = (metadata.get('contains_binary_extension', False) or
metadata.get('contains_binary_content', False))
binary_components = metadata.get('binary_components', False)
log.info('Updating binary flags for addon with id=%s: '
'binary -> %s, binary_components -> %s' % (
addon.pk, binary, binary_components))
file.update(binary=binary, binary_components=binary_components)
except Exception, err:
log.error('Failed to run validation on addon id: %s, %s'
% (addon.pk, err))
@task
@set_modified_on
def resize_icon(src, dst, size, locally=False, **kw):
"""Resizes addon icons."""
log.info('[1@None] Resizing icon: %s' % dst)
try:
if isinstance(size, list):
for s in size:
resize_image(src, '%s-%s.png' % (dst, s), (s, s),
remove_src=False, locally=locally)
if locally:
os.remove(src)
else:
storage.delete(src)
else:
resize_image(src, dst, (size, size), remove_src=True,
locally=locally)
return True
except Exception, e:
log.error("Error saving addon icon: %s" % e)
@task
@set_modified_on
def resize_preview(src, instance, **kw):
"""Resizes preview images and stores the sizes on the preview."""
thumb_dst, full_dst = instance.thumbnail_path, instance.image_path
sizes = {}
log.info('[1@None] Resizing preview and storing size: %s' % thumb_dst)
try:
sizes['thumbnail'] = resize_image(src, thumb_dst,
amo.ADDON_PREVIEW_SIZES[0],
remove_src=False)
sizes['image'] = resize_image(src, full_dst,
amo.ADDON_PREVIEW_SIZES[1],
remove_src=False)
instance.sizes = sizes
instance.save()
return True
except Exception, e:
log.error("Error saving preview: %s" % e)
finally:
# Finally delete the temporary now useless source file.
if os.path.exists(src):
os.unlink(src)
@task
@write
def get_preview_sizes(ids, **kw):
log.info('[%s@%s] Getting preview sizes for addons starting at id: %s...'
% (len(ids), get_preview_sizes.rate_limit, ids[0]))
addons = Addon.objects.filter(pk__in=ids).no_transforms()
for addon in addons:
previews = addon.previews.all()
log.info('Found %s previews for: %s' % (previews.count(), addon.pk))
for preview in previews:
try:
log.info('Getting size for preview: %s' % preview.pk)
sizes = {
'thumbnail': Image.open(
storage.open(preview.thumbnail_path)).size,
'image': Image.open(storage.open(preview.image_path)).size,
}
preview.update(sizes=sizes)
except Exception, err:
log.error('Failed to find size of preview: %s, error: %s'
% (addon.pk, err))
@task
@write
def convert_purified(ids, **kw):
log.info('[%s@%s] Converting fields to purified starting at id: %s...'
% (len(ids), convert_purified.rate_limit, ids[0]))
fields = ['the_reason', 'the_future']
for addon in Addon.objects.filter(pk__in=ids):
flag = False
for field in fields:
value = getattr(addon, field)
if value:
value.clean()
if (value.localized_string_clean != value.localized_string):
flag = True
if flag:
log.info('Saving addon: %s to purify fields' % addon.pk)
addon.save()
def failed_validation(*messages):
"""Return a validation object that looks like the add-on validator."""
m = []
for msg in messages:
m.append({'type': 'error', 'message': msg, 'tier': 1})
return json.dumps({'errors': 1, 'success': False, 'messages': m})
def _fetch_content(url):
try:
return urllib2.urlopen(url, timeout=15)
except urllib2.HTTPError, e:
raise Exception(_('%s responded with %s (%s).') % (url, e.code, e.msg))
except urllib2.URLError, e:
# Unpack the URLError to try and find a useful message.
if isinstance(e.reason, socket.timeout):
raise Exception(_('Connection to "%s" timed out.') % url)
elif isinstance(e.reason, socket.gaierror):
raise Exception(_('Could not contact host at "%s".') % url)
else:
raise Exception(str(e.reason))
def check_content_type(response, content_type,
no_ct_message, wrong_ct_message):
if not response.headers.get('Content-Type', '').startswith(content_type):
if 'Content-Type' in response.headers:
raise Exception(wrong_ct_message %
(content_type, response.headers['Content-Type']))
else:
raise Exception(no_ct_message % content_type)
def get_content_and_check_size(response, max_size, error_message):
# Read one extra byte. Reject if it's too big so we don't have issues
# downloading huge files.
content = response.read(max_size + 1)
if len(content) > max_size:
raise Exception(error_message % max_size)
return content
@task
def start_perf_test_for_file(file_id, os_name, app_name, **kw):
log.info('[@%s] Starting perf tests for file %s on %s / %s'
% (start_perf_test_for_file.rate_limit, file_id,
os_name, app_name))
file_ = File.objects.get(pk=file_id)
# TODO(Kumar) store token to retrieve results later?
perf.start_perf_test(file_, os_name, app_name)
@task
def send_welcome_email(addon_pk, emails, context, **kw):
log.info(u'[1@None] Sending welcome email for %s to %s.' %
(addon_pk, emails))
app = context.get('app', unicode(amo.FIREFOX.pretty))
subject = u'Mozilla Add-ons: Thanks for submitting a %s Add-on!' % app
html_template = 'devhub/email/submission.html'
text_template = 'devhub/email/submission.txt'
return send_html_mail_jinja(subject, html_template, text_template,
context, recipient_list=emails,
from_email=settings.NOBODY_EMAIL,
use_blacklist=False,
perm_setting='individual_contact',
headers={'Reply-To': settings.EDITORS_EMAIL})
|
|
import core.modules.module_registry
import core.modules.vistrails_module
from logging import debug, warn
from core.vistrail.connection import Connection
from core.vistrail.port import Port
from PyQt4 import QtGui, QtCore
import api
import vcs
import os
cdat_id = "edu.utah.sci.vistrails.cdat"
# Module names
# Note: open_name, quickplot_name, etc... are used in cdat_window.py
# and graphics_method_controller.py
open_name = 'open'
quickplot_name = 'Quickplot'
variable_name = 'Variable'
cdatcell_name = 'CDATCell'
gm_name = 'GraphicsMethod'
# Port names
# Module Namespaces
namespace = {}
namespace[open_name] = 'cdms2'
namespace[quickplot_name] = 'cdat'
namespace[variable_name] = 'cdat'
namespace[cdatcell_name] = 'cdat'
namespace[gm_name] = 'cdat'
# MAJOR TODO: Support for multiple workflows
class Workflow():
""" A Workflow contains a dict of modules belonging to the workflow. Workflow
manages the updating of its modules connections / values """
def __init__(self):
""" Workflow contains a dictionary of modules in the workflow.
* Key - if module is a variable then the key is the name of the variable
otherwise the key is the name of the module. ***** IMPORTANT *****
* Value - module object
"""
self.modules = {}
self.variableNames = [] # Names of the variables in this workflow
self.filename = None # Name of the cdms file associated with this workflow
def moduleExists(self, key):
return key in list(self.modules)
def addModule(self, name, module, isVariable=False):
self.modules[name] = module
# If the module is a variable save the name and connect the variable
# module to the open module
if isVariable:
self.variableNames.append(name)
self.connectPorts(self.modules[open_name], 'dataset', module,
'cdmsfile')
def updatePorts(self, var1, var2):
""" updatePorts(var1: str, var2: str)
updatePorts connects the output ports of Variable modules with vars
var1 and var2 to the input ports of Graphics Method and then connects
the output of Graphics Method to the input of CDATCell.
"""
m_open = self.modules[open_name]
m_variable1 = self.modules[var1]
m_gm = self.modules[gm_name]
m_cdatcell = self.modules[cdatcell_name]
# Connect the variable module being plotted to the graphics method
# module and store the connection
self.connectPorts(m_variable1, 'variable', m_gm, 'slab1')
# Connect graphics method module & cdatcell module
self.connectPorts(m_gm, 'canvas', m_cdatcell, 'canvas')
self.connectPorts(m_gm, 'slab1', m_cdatcell, 'slab1')
# Connect the second variable module (if given) to graphics method
# and Graphics Method to CDATCell
if var2 is not None:
m_var2 = self.modules[var2]
self.connectPorts(m_var2, 'variable', m_gm, 'slab2')
self.connectPorts(m_gm, 'slab2', m_cdatcell, 'slab2')
def connectPorts(self, moduleA, outputPortName, moduleB, inputPortName):
""" connectPorts(moduleA: Module, outputPortName: str, moduleB: Module,
inputPortName: str) -> Connection
connectPorts connects moduleA's outputPort to moduleB's inputPort.
"""
reg = api.get_module_registry()
in_port = moduleA.get_port_spec(outputPortName, 'output')
out_port = moduleB.get_port_spec(inputPortName, 'input')
return api.add_connection(moduleA.id, outputPortName, moduleB.id,
inputPortName)
def updateModule(self, name, portName, value):
""" updateModule(name: str, portName: str, value: *)
updateModule updates the vistrail module given the module name, input
port name, and value
"""
if name not in list(self.modules):
return
# Set the filename associated w/ this workflow
if name == open_name:
self.filename = value
module = self.modules[name]
api.get_current_controller().update_function(module, portName, [value])
def updateModuleOps(self, name, args):
""" updateModule(name: str, args: list)
updateModule updates the vistrail module given the module name and a
list of tuples where each tuple = (input port name, value)
"""
if name not in list(self.modules):
return
module = self.modules[name]
for portName, value in args:
api.get_current_controller().update_function(module, portName,
[value])
class GuiController(QtCore.QObject):
""" GuiController calls vistrails functions and handles recording and
displaying teaching commands.
The two most important functions that GuiController provides are:
'createModule' and 'updateModule'. 'createModule' creates a new box.
'updateModule' updates the input of a box. Widgets interact with
GuiController by sending signals (which is why GuiController inherits
from QObject).
"""
def __init__(self, fileWidget, defVarWidget, varWidget):
""" __init__(fileWidget: QCDATFileWidget, defVarWidget:QDefinedVariable,
varWidget: QVariableView)
"""
QtCore.QObject.__init__(self)
self.teachingCommands = ''
self.editorPid = 0
self.recordCommands = True
self.workflows = [] # List of workflows
# X coordinates of open, variable, and plot related modules
self.openX = -300
self.variableX = -300
self.plotX = -320
self.m_open = None
self.m_variable = None
self.m_graphics_method = None
self.m_cdat_cell = None
# Connect the 3 main widgets to the primary GuiController
# functionality. If a childwidget of the 3 main widgets wants to
# send signals to GuiController, then it must send it through one of the
# main widgets to try to keep things less messy
self.connect(fileWidget, QtCore.SIGNAL('createModule'),
self.createModule)
self.connect(fileWidget, QtCore.SIGNAL('updateModule'),
self.updateModule)
self.connect(fileWidget, QtCore.SIGNAL('recordTeachingCommand'),
self.recordTeachingCommand)
self.connect(defVarWidget, QtCore.SIGNAL('createModule'),
self.createModule)
self.connect(defVarWidget, QtCore.SIGNAL('updateModule'),
self.updateModule)
self.connect(defVarWidget, QtCore.SIGNAL('recordTeachingCommand'),
self.recordTeachingCommand)
self.connect(varWidget, QtCore.SIGNAL('recordTeachingCommand'),
self.recordTeachingCommand)
self.connect(varWidget, QtCore.SIGNAL('createModule'),
self.createModule)
self.connect(varWidget, QtCore.SIGNAL('updateModule'),
self.updateModule)
self.connect(varWidget, QtCore.SIGNAL('updateModuleOps'),
self.updateModuleOps)
self.connect(varWidget, QtCore.SIGNAL('plot'),
self.plot)
def createNewWorkflow(self):
""" createnewWorkflow(filename: str)
createNewWorkflow is called when the user selects a new file. It makes
a new workflow and appends it to the workflow list.
"""
if self.workflows == []:
api.get_current_controller().change_selected_version(0)
self.currentWorkflow = Workflow()
self.workflows.append(self.currentWorkflow)
def getCoordinates(self, name):
""" getCoordinates(name: str) -> x: int, y: int
Return the x, y coordinate of where to place the new module given the
name.
"""
if name == open_name:
self.openX += 100
return self.openX, 170
elif name == variable_name or name == quickplot_name:
self.variableX += 120
return self.variableX, 70
elif name == gm_name:
self.plotX += 160
return self.plotX, -70
elif name == cdatcell_name:
return self.plotX, -170
def createModule(self, name, key=None):
""" createModule(name: str, key: str)
createModule creates a new module given the name and adds it to the
current workflow. If key is specified, use the key instead of the name
as the key into the workflow's module dictionary.
Note: For Variable modules I pass key = variabe name (the tabName)
"""
# If module is the 'open' module, create a new workflow
if name == open_name:
self.createNewWorkflow()
# If module is 'quickplot' module, and it already exists, do nothing
if name == quickplot_name:
if self.currentWorkflow.moduleExists(quickplot_name.lower()):
return
# Create & add the module
x, y = self.getCoordinates(name)
module = api.add_module(x, y, cdat_id, name, namespace[name])
if key is not None:
self.currentWorkflow.addModule(key, module, isVariable=True)
else:
self.currentWorkflow.addModule(name, module)
def updateModule(self, name, portName, value):
""" updateModule(name: str, portName: str, value: *)
updateModule updates the vistrail module given the module name, input
port name, and value
"""
self.currentWorkflow.updateModule(name, portName, value)
def updateModuleOps(self, name, args):
""" updateModule(name: str, args: list)
updateModule updates the vistrail module given the module name and a
list of tuples where each tuple = (input port name, value)
"""
self.currentWorkflow.updateModuleOps(name, args)
def plot(self, var1, var2):
""" Connect / disconnect the necessary ports and exec workflow -> plot
into the cell
"""
self.currentWorkflow.updatePorts(var1, var2)
api.get_current_controller().execute_current_workflow()
def initTeachingCommands(self):
""" The initial teaching commands still have 4 canvases like the old
vcdat. This allows you to run the teaching commands independently
of vistrails' spreadsheets.
"""
self.teachingCommands += 'import cdms2, vcs, cdutil, genutil, os, sys\n'
self.teachingCommands += 'import MV2\n'
self.teachingCommands += '\n# Initialize the four VCS Canvases by creating\n'
self.teachingCommands += '# a list to hold the 4 VCS Canvas\n'
self.teachingCommands += 'vcs_canvas_list = []\n'
self.teachingCommands += '\n# Loop (from 0 to 3) to create VCS Canvas 1, 2, 3, and 4\n'
self.teachingCommands += 'for i in range(4):\n'
self.teachingCommands += ' vcs_canvas_list.append(vcs.init())\n'
self.teachingCommands += '\n# Set the Command Line VCS Canvas hooks\n'
self.teachingCommands += 'vcs_hook1 = vcs_canvas_list[0]\n'
self.teachingCommands += 'vcs_hook2 = vcs_canvas_list[1]\n'
self.teachingCommands += 'vcs_hook3 = vcs_canvas_list[2]\n'
self.teachingCommands += 'vcs_hook4 = vcs_canvas_list[3]\n'
self.writeTeachingCommands()
def recordTeachingCommand(self, command):
if (self.recordCommands == True):
self.teachingCommands += command
self.writeTeachingCommands()
def setRecordCommands(self, recordCommands):
self.recordCommands = recordCommands
def writeTeachingCommands(self):
try:
fn = '%s/PCMDI_GRAPHICS' % os.environ['HOME']
except:
print "Could not find the $HOME directory. Set your environment variable 'HOME'"
print "to your home directory. (e.g., 'setenv HOME /home/user')."
sys.exit()
# Create PCMDI_GRAPHICS directory if it does not exist
if (os.access(fn, os.X_OK) == 0):
try:
os.mkdir(fn)
except:
print 'Do not have write permission for home directory. Must have write permissions.'
sys.exit()
# Write teaching commands to vcdat_recording_script_file.py
self.teachingScript = fn + '/vcdat_recording_script_file.py'
file = open(self.teachingScript, 'w')
file.write(self.teachingCommands)
file.flush()
def viewTeachingCommands(self):
""" Open the teaching commands script in a child process """
self.editorPid = os.fork()
if (self.editorPid == 0):
editor = 'idle'
# If idle editor is found, view teaching commands with idle
for path in os.environ["PATH"].split(os.pathsep):
file = os.path.join(path, editor)
if (os.path.exists(file)):
args = (editor, self.teachingScript)
os.execvp(editor, args)
return
# If idle editor is not found, use default editor
secondaryEditor = os.environ['EDITOR']
args = (secondaryEditor, self.teachingScript)
os.execvp(secondaryEditor, args)
def closeTeachingCommands(self):
if (self.editorPid != 0):
os.kill(self.editorPid, 9)
self.editorPid = 0
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
import os.path
import sys
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
try:
imp.find_module("ply")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove("mojo"), "third_party"))
from ply.lex import TOKEN
from ..error import Error
class LexError(Error):
"""Class for errors from the lexer."""
def __init__(self, filename, message, lineno):
Error.__init__(self, filename, message, lineno=lineno)
# We have methods which look like they could be functions:
# pylint: disable=R0201
class Lexer(object):
def __init__(self, filename):
self.filename = filename
######################-- PRIVATE --######################
##
## Internal auxiliary methods
##
def _error(self, msg, token):
raise LexError(self.filename, msg, token.lineno)
##
## Reserved keywords
##
keywords = (
'HANDLE',
'IMPORT',
'MODULE',
'STRUCT',
'INTERFACE',
'ENUM',
'CONST',
'TRUE',
'FALSE',
'DEFAULT',
)
keyword_map = {}
for keyword in keywords:
keyword_map[keyword.lower()] = keyword
##
## All the tokens recognized by the lexer
##
tokens = keywords + (
# Identifiers
'NAME',
# Constants
'ORDINAL',
'INT_CONST_DEC', 'INT_CONST_HEX',
'FLOAT_CONST',
# String literals
'STRING_LITERAL',
# Operators
'MINUS',
'PLUS',
'AMP',
'QSTN',
# Assignment
'EQUALS',
# Request / response
'RESPONSE',
# Delimiters
'LPAREN', 'RPAREN', # ( )
'LBRACKET', 'RBRACKET', # [ ]
'LBRACE', 'RBRACE', # { }
'LANGLE', 'RANGLE', # < >
'SEMI', # ;
'COMMA', 'DOT' # , .
)
##
## Regexes for use in tokens
##
# valid C identifiers (K&R2: A.2.3)
identifier = r'[a-zA-Z_][0-9a-zA-Z_]*'
hex_prefix = '0[xX]'
hex_digits = '[0-9a-fA-F]+'
# integer constants (K&R2: A.2.5.1)
decimal_constant = '0|([1-9][0-9]*)'
hex_constant = hex_prefix+hex_digits
# Don't allow octal constants (even invalid octal).
octal_constant_disallowed = '0[0-9]+'
# character constants (K&R2: A.2.5.2)
# Note: a-zA-Z and '.-~^_!=&;,' are allowed as escape chars to support #line
# directives with Windows paths as filenames (..\..\dir\file)
# For the same reason, decimal_escape allows all digit sequences. We want to
# parse all correct code, even if it means to sometimes parse incorrect
# code.
#
simple_escape = r"""([a-zA-Z._~!=&\^\-\\?'"])"""
decimal_escape = r"""(\d+)"""
hex_escape = r"""(x[0-9a-fA-F]+)"""
bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])"""
escape_sequence = \
r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))'
# string literals (K&R2: A.2.6)
string_char = r"""([^"\\\n]|"""+escape_sequence+')'
string_literal = '"'+string_char+'*"'
bad_string_literal = '"'+string_char+'*'+bad_escape+string_char+'*"'
# floating constants (K&R2: A.2.5.3)
exponent_part = r"""([eE][-+]?[0-9]+)"""
fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)"""
floating_constant = \
'(((('+fractional_constant+')'+ \
exponent_part+'?)|([0-9]+'+exponent_part+')))'
# Ordinals
ordinal = r'@[0-9]+'
missing_ordinal_value = r'@'
# Don't allow ordinal values in octal (even invalid octal, like 09) or
# hexadecimal.
octal_or_hex_ordinal_disallowed = r'@((0[0-9]+)|('+hex_prefix+hex_digits+'))'
##
## Rules for the normal state
##
t_ignore = ' \t\r'
# Newlines
def t_NEWLINE(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
# Operators
t_MINUS = r'-'
t_PLUS = r'\+'
t_AMP = r'&'
t_QSTN = r'\?'
# =
t_EQUALS = r'='
# =>
t_RESPONSE = r'=>'
# Delimiters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_LANGLE = r'<'
t_RANGLE = r'>'
t_COMMA = r','
t_DOT = r'\.'
t_SEMI = r';'
t_STRING_LITERAL = string_literal
# The following floating and integer constants are defined as
# functions to impose a strict order (otherwise, decimal
# is placed before the others because its regex is longer,
# and this is bad)
#
@TOKEN(floating_constant)
def t_FLOAT_CONST(self, t):
return t
@TOKEN(hex_constant)
def t_INT_CONST_HEX(self, t):
return t
@TOKEN(octal_constant_disallowed)
def t_OCTAL_CONSTANT_DISALLOWED(self, t):
msg = "Octal values not allowed"
self._error(msg, t)
@TOKEN(decimal_constant)
def t_INT_CONST_DEC(self, t):
return t
# unmatched string literals are caught by the preprocessor
@TOKEN(bad_string_literal)
def t_BAD_STRING_LITERAL(self, t):
msg = "String contains invalid escape code"
self._error(msg, t)
# Handle ordinal-related tokens in the right order:
@TOKEN(octal_or_hex_ordinal_disallowed)
def t_OCTAL_OR_HEX_ORDINAL_DISALLOWED(self, t):
msg = "Octal and hexadecimal ordinal values not allowed"
self._error(msg, t)
@TOKEN(ordinal)
def t_ORDINAL(self, t):
return t
@TOKEN(missing_ordinal_value)
def t_BAD_ORDINAL(self, t):
msg = "Missing ordinal value"
self._error(msg, t)
@TOKEN(identifier)
def t_NAME(self, t):
t.type = self.keyword_map.get(t.value, "NAME")
return t
# Ignore C and C++ style comments
def t_COMMENT(self, t):
r'(/\*(.|\n)*?\*/)|(//.*(\n[ \t]*//.*)*)'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
msg = "Illegal character %s" % repr(t.value[0])
self._error(msg, t)
|
|
# -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: December 15, 2003
# Author: Francesc Alted - faltet@pytables.com
#
# $Id$
#
########################################################################
"""Here is defined the EArray class."""
import numpy
from tables.utils import convert_to_np_atom2, SizeType
from tables.carray import CArray
# default version for EARRAY objects
# obversion = "1.0" # initial version
# obversion = "1.1" # support for complex datatypes
# obversion = "1.2" # This adds support for time datatypes.
# obversion = "1.3" # This adds support for enumerated datatypes.
obversion = "1.4" # Numeric and numarray flavors are gone.
class EArray(CArray):
"""This class represents extendable, homogeneous datasets in an HDF5 file.
The main difference between an EArray and a CArray (see
:ref:`CArrayClassDescr`), from which it inherits, is that the former
can be enlarged along one of its dimensions, the *enlargeable
dimension*. That means that the :attr:`Leaf.extdim` attribute (see
:class:`Leaf`) of any EArray instance will always be non-negative.
Multiple enlargeable dimensions might be supported in the future.
New rows can be added to the end of an enlargeable array by using the
:meth:`EArray.append` method.
Parameters
----------
parentnode
The parent :class:`Group` object.
.. versionchanged:: 3.0
Renamed from *parentNode* to *parentnode*.
name : str
The name of this node in its parent group.
atom
An `Atom` instance representing the *type* and *shape*
of the atomic objects to be saved.
shape
The shape of the new array. One (and only one) of
the shape dimensions *must* be 0. The dimension being 0
means that the resulting `EArray` object can be extended
along it. Multiple enlargeable dimensions are not supported
right now.
title
A description for this node (it sets the ``TITLE``
HDF5 attribute on disk).
filters
An instance of the `Filters` class that provides information
about the desired I/O filters to be applied during the life
of this object.
expectedrows
A user estimate about the number of row elements that will
be added to the growable dimension in the `EArray` node.
If not provided, the default value is ``EXPECTED_ROWS_EARRAY``
(see ``tables/parameters.py``). If you plan to create either
a much smaller or a much bigger `EArray` try providing a guess;
this will optimize the HDF5 B-Tree creation and management
process time and the amount of memory used.
chunkshape
The shape of the data chunk to be read or written in a single
HDF5 I/O operation. Filters are applied to those chunks of data.
The dimensionality of `chunkshape` must be the same as that of
`shape` (beware: no dimension should be 0 this time!).
If ``None``, a sensible value is calculated based on the
`expectedrows` parameter (which is recommended).
byteorder
The byteorder of the data *on disk*, specified as 'little' or
'big'. If this is not specified, the byteorder is that of the
platform.
Examples
--------
See below a small example of the use of the `EArray` class. The
code is available in ``examples/earray1.py``::
import tables
import numpy
fileh = tables.open_file('earray1.h5', mode='w')
a = tables.StringAtom(itemsize=8)
# Use ``a`` as the object type for the enlargeable array.
array_c = fileh.create_earray(fileh.root, 'array_c', a, (0,),
\"Chars\")
array_c.append(numpy.array(['a'*2, 'b'*4], dtype='S8'))
array_c.append(numpy.array(['a'*6, 'b'*8, 'c'*10], dtype='S8'))
# Read the string ``EArray`` we have created on disk.
for s in array_c:
print('array_c[%s] => %r' % (array_c.nrow, s))
# Close the file.
fileh.close()
The output for the previous script is something like::
array_c[0] => 'aa'
array_c[1] => 'bbbb'
array_c[2] => 'aaaaaa'
array_c[3] => 'bbbbbbbb'
array_c[4] => 'cccccccc'
"""
# Class identifier.
_c_classid = 'EARRAY'
# Special methods
# ~~~~~~~~~~~~~~~
def __init__(self, parentnode, name,
atom=None, shape=None, title="",
filters=None, expectedrows=None,
chunkshape=None, byteorder=None,
_log=True):
# Specific of EArray
if expectedrows is None:
expectedrows = parentnode._v_file.params['EXPECTED_ROWS_EARRAY']
self._v_expectedrows = expectedrows
"""The expected number of rows to be stored in the array."""
# Call the parent (CArray) init code
super(EArray, self).__init__(parentnode, name, atom, shape, title,
filters, chunkshape, byteorder, _log)
# Public and private methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
def _g_create(self):
"""Create a new array in file (specific part)."""
# Pre-conditions and extdim computation
zerodims = numpy.sum(numpy.array(self.shape) == 0)
if zerodims > 0:
if zerodims == 1:
self.extdim = list(self.shape).index(0)
else:
raise NotImplementedError(
"Multiple enlargeable (0-)dimensions are not "
"supported.")
else:
raise ValueError(
"When creating EArrays, you need to set one of "
"the dimensions of the Atom instance to zero.")
# Finish the common part of the creation process
return self._g_create_common(self._v_expectedrows)
def _check_shape_append(self, nparr):
"Test that nparr shape is consistent with underlying EArray."
# The arrays conforms self expandibility?
myrank = len(self.shape)
narank = len(nparr.shape) - len(self.atom.shape)
if myrank != narank:
raise ValueError(("the ranks of the appended object (%d) and the "
"``%s`` EArray (%d) differ")
% (narank, self._v_pathname, myrank))
for i in range(myrank):
if i != self.extdim and self.shape[i] != nparr.shape[i]:
raise ValueError(("the shapes of the appended object and the "
"``%s`` EArray differ in non-enlargeable "
"dimension %d") % (self._v_pathname, i))
def append(self, sequence):
"""Add a sequence of data to the end of the dataset.
The sequence must have the same type as the array; otherwise a
TypeError is raised. In the same way, the dimensions of the
sequence must conform to the shape of the array, that is, all
dimensions must match, with the exception of the enlargeable
dimension, which can be of any length (even 0!). If the shape
of the sequence is invalid, a ValueError is raised.
"""
self._g_check_open()
self._v_file._check_writable()
# Convert the sequence into a NumPy object
nparr = convert_to_np_atom2(sequence, self.atom)
# Check if it has a consistent shape with underlying EArray
self._check_shape_append(nparr)
# If the size of the nparr is zero, don't do anything else
if nparr.size > 0:
self._append(nparr)
def _g_copy_with_stats(self, group, name, start, stop, step,
title, filters, chunkshape, _log, **kwargs):
"""Private part of Leaf.copy() for each kind of leaf."""
(start, stop, step) = self._process_range_read(start, stop, step)
# Build the new EArray object
maindim = self.maindim
shape = list(self.shape)
shape[maindim] = 0
# The number of final rows
nrows = len(xrange(0, stop - start, step))
# Build the new EArray object
object = EArray(
group, name, atom=self.atom, shape=shape, title=title,
filters=filters, expectedrows=nrows, chunkshape=chunkshape,
_log=_log)
# Now, fill the new earray with values from source
nrowsinbuf = self.nrowsinbuf
# The slices parameter for self.__getitem__
slices = [slice(0, dim, 1) for dim in self.shape]
# This is a hack to prevent doing unnecessary conversions
# when copying buffers
self._v_convert = False
# Start the copy itself
for start2 in xrange(start, stop, step * nrowsinbuf):
# Save the records on disk
stop2 = start2 + step * nrowsinbuf
if stop2 > stop:
stop2 = stop
# Set the proper slice in the extensible dimension
slices[maindim] = slice(start2, stop2, step)
object._append(self.__getitem__(tuple(slices)))
# Active the conversion again (default)
self._v_convert = True
nbytes = numpy.prod(self.shape, dtype=SizeType) * self.atom.itemsize
return (object, nbytes)
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
|
|
'''
Video
=====
The :class:`Video` widget is used to display video files and streams.
Depending on your Video core provider, platform, and plugins, you will
be able to play different formats. For example, the pygame video
provider only supports MPEG1 on Linux and OSX. GStreamer is more
versatile, and can read many video containers and codecs such as MKV,
OGV, AVI, MOV, FLV (if the correct gstreamer plugins are installed). Our
:class:`~kivy.core.video.VideoBase` implementation is used under the
hood.
Video loading is asynchronous - many properties are not available until
the video is loaded (when the texture is created)::
def on_position_change(instance, value):
print('The position in the video is', value)
def on_duration_change(instance, value):
print('The duration of the video is', video)
video = Video(source='PandaSneezes.avi')
video.bind(position=on_position_change,
duration=on_duration_change)
'''
__all__ = ('Video', )
from kivy.clock import Clock
from kivy.uix.image import Image
from kivy.core.video import Video as CoreVideo
from kivy.resources import resource_find
from kivy.properties import (BooleanProperty, NumericProperty, ObjectProperty,
OptionProperty)
class Video(Image):
'''Video class. See module documentation for more information.
'''
state = OptionProperty('stop', options=('play', 'pause', 'stop'))
'''String, indicates whether to play, pause, or stop the video::
# start playing the video at creation
video = Video(source='movie.mkv', state='play')
# create the video, and start later
video = Video(source='movie.mkv')
# and later
video.state = 'play'
:attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults
to 'stop'.
'''
play = BooleanProperty(False)
'''
.. deprecated:: 1.4.0
Use :attr:`state` instead.
Boolean, indicates whether the video is playing or not.
You can start/stop the video by setting this property::
# start playing the video at creation
video = Video(source='movie.mkv', play=True)
# create the video, and start later
video = Video(source='movie.mkv')
# and later
video.play = True
:attr:`play` is a :class:`~kivy.properties.BooleanProperty` and defaults to
False.
.. deprecated:: 1.4.0
Use :attr:`state` instead.
'''
eos = BooleanProperty(False)
'''Boolean, indicates whether the video has finished playing or not
(reached the end of the stream).
:attr:`eos` is a :class:`~kivy.properties.BooleanProperty` and defaults to
False.
'''
loaded = BooleanProperty(False)
'''Boolean, indicates whether the video is loaded and ready for playback
or not.
.. versionadded:: 1.6.0
:attr:`loaded` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
position = NumericProperty(-1)
'''Position of the video between 0 and :attr:`duration`. The position
defaults to -1 and is set to a real position when the video is loaded.
:attr:`position` is a :class:`~kivy.properties.NumericProperty` and
defaults to -1.
'''
duration = NumericProperty(-1)
'''Duration of the video. The duration defaults to -1, and is set to a real
duration when the video is loaded.
:attr:`duration` is a :class:`~kivy.properties.NumericProperty` and
defaults to -1.
'''
volume = NumericProperty(1.)
'''Volume of the video, in the range 0-1. 1 means full volume, 0
means mute.
:attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults
to 1.
'''
options = ObjectProperty({})
'''Options to pass at Video core object creation.
.. versionadded:: 1.0.4
:attr:`options` is an :class:`kivy.properties.ObjectProperty` and defaults
to {}.
'''
def __init__(self, **kwargs):
self._video = None
super(Video, self).__init__(**kwargs)
self.fast_bind('source', self._trigger_video_load)
if "eos" in kwargs:
self.options["eos"] = kwargs["eos"]
if self.source:
self._trigger_video_load()
def seek(self, percent):
'''Change the position to a percentage of duration. Percentage
must be a value between 0-1.
.. warning::
Calling seek() before the video is loaded has no impact.
.. versionadded:: 1.2.0
'''
if self._video is None:
raise Exception('Video not loaded.')
self._video.seek(percent)
def _trigger_video_load(self, *largs):
Clock.unschedule(self._do_video_load)
Clock.schedule_once(self._do_video_load, -1)
def _do_video_load(self, *largs):
if CoreVideo is None:
return
if self._video:
self._video.stop()
if not self.source:
self._video = None
self.texture = None
else:
filename = self.source
# Check if filename is not url
if not '://' in filename:
filename = resource_find(filename)
self._video = CoreVideo(filename=filename, **self.options)
self._video.volume = self.volume
self._video.bind(on_load=self._on_load,
on_frame=self._on_video_frame,
on_eos=self._on_eos)
if self.state == 'play' or self.play:
self._video.play()
self.duration = 1.
self.position = 0.
def on_play(self, instance, value):
value = 'play' if value else 'stop'
return self.on_state(instance, value)
def on_state(self, instance, value):
if not self._video:
return
if value == 'play':
if self.eos:
self._video.stop()
self._video.position = 0.
self._video.eos = False
self.eos = False
self._video.play()
elif value == 'pause':
self._video.pause()
else:
self._video.stop()
self._video.position = 0
self._video.eos = False
def _on_video_frame(self, *largs):
video = self._video
if not video:
return
self.duration = video.duration
self.position = video.position
self.texture = video.texture
self.canvas.ask_update()
def _on_eos(self, *largs):
if self._video.eos != 'loop':
self.state = 'stop'
self.eos = True
def _on_load(self, *largs):
self.loaded = True
self._on_video_frame(largs)
def on_volume(self, instance, value):
if self._video:
self._video.volume = value
def unload(self):
'''Unload the video. The playback will be stopped.
.. versionadded:: 1.8.0
'''
if self._video:
self._video.stop()
self._video.unload()
self._video = None
if __name__ == '__main__':
from kivy.app import App
import sys
if len(sys.argv) != 2:
print("usage: %s file" % sys.argv[0])
sys.exit(1)
class VideoApp(App):
def build(self):
self.v = Video(source=sys.argv[1], state='play')
self.v.bind(state=self.replay)
return self.v
def replay(self, *args):
if self.v.state == 'stop':
self.v.state = 'play'
VideoApp().run()
|
|
import collections
import warnings
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import compat
from . import protocols
from . import transports
from .log import logger
def _create_transport_context(server_side, server_hostname):
if server_side:
raise ValueError('Server side SSL needs a valid SSLContext')
# Client side may pass ssl=True to use a default
# context; in that case the sslcontext passed is None.
# The default is secure for client connections.
if hasattr(ssl, 'create_default_context'):
# Python 3.4+: use up-to-date strong settings.
sslcontext = ssl.create_default_context()
if not server_hostname:
sslcontext.check_hostname = False
else:
# Fallback for Python 3.3.
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.options |= ssl.OP_NO_SSLv3
sslcontext.set_default_verify_paths()
sslcontext.verify_mode = ssl.CERT_REQUIRED
return sslcontext
def _is_sslproto_available():
return hasattr(ssl, "MemoryBIO")
# States of an _SSLPipe.
_UNWRAPPED = "UNWRAPPED"
_DO_HANDSHAKE = "DO_HANDSHAKE"
_WRAPPED = "WRAPPED"
_SHUTDOWN = "SHUTDOWN"
class _SSLPipe(object):
"""An SSL "Pipe".
An SSL pipe allows you to communicate with an SSL/TLS protocol instance
through memory buffers. It can be used to implement a security layer for an
existing connection where you don't have access to the connection's file
descriptor, or for some reason you don't want to use it.
An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode,
data is passed through untransformed. In wrapped mode, application level
data is encrypted to SSL record level data and vice versa. The SSL record
level is the lowest level in the SSL protocol suite and is what travels
as-is over the wire.
An SslPipe initially is in "unwrapped" mode. To start SSL, call
do_handshake(). To shutdown SSL again, call unwrap().
"""
max_size = 256 * 1024 # Buffer size passed to read()
def __init__(self, context, server_side, server_hostname=None):
"""
The *context* argument specifies the ssl.SSLContext to use.
The *server_side* argument indicates whether this is a server side or
client side transport.
The optional *server_hostname* argument can be used to specify the
hostname you are connecting to. You may only specify this parameter if
the _ssl module supports Server Name Indication (SNI).
"""
self._context = context
self._server_side = server_side
self._server_hostname = server_hostname
self._state = _UNWRAPPED
self._incoming = ssl.MemoryBIO()
self._outgoing = ssl.MemoryBIO()
self._sslobj = None
self._need_ssldata = False
self._handshake_cb = None
self._shutdown_cb = None
@property
def context(self):
"""The SSL context passed to the constructor."""
return self._context
@property
def ssl_object(self):
"""The internal ssl.SSLObject instance.
Return None if the pipe is not wrapped.
"""
return self._sslobj
@property
def need_ssldata(self):
"""Whether more record level data is needed to complete a handshake
that is currently in progress."""
return self._need_ssldata
@property
def wrapped(self):
"""
Whether a security layer is currently in effect.
Return False during handshake.
"""
return self._state == _WRAPPED
def do_handshake(self, callback=None):
"""Start the SSL handshake.
Return a list of ssldata. A ssldata element is a list of buffers
The optional *callback* argument can be used to install a callback that
will be called when the handshake is complete. The callback will be
called with None if successful, else an exception instance.
"""
if self._state != _UNWRAPPED:
raise RuntimeError('handshake in progress or completed')
self._sslobj = self._context.wrap_bio(
self._incoming, self._outgoing,
server_side=self._server_side,
server_hostname=self._server_hostname)
self._state = _DO_HANDSHAKE
self._handshake_cb = callback
ssldata, appdata = self.feed_ssldata(b'', only_handshake=True)
assert len(appdata) == 0
return ssldata
def shutdown(self, callback=None):
"""Start the SSL shutdown sequence.
Return a list of ssldata. A ssldata element is a list of buffers
The optional *callback* argument can be used to install a callback that
will be called when the shutdown is complete. The callback will be
called without arguments.
"""
if self._state == _UNWRAPPED:
raise RuntimeError('no security layer present')
if self._state == _SHUTDOWN:
raise RuntimeError('shutdown in progress')
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
self._state = _SHUTDOWN
self._shutdown_cb = callback
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
return ssldata
def feed_eof(self):
"""Send a potentially "ragged" EOF.
This method will raise an SSL_ERROR_EOF exception if the EOF is
unexpected.
"""
self._incoming.write_eof()
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
def feed_ssldata(self, data, only_handshake=False):
"""Feed SSL record level data into the pipe.
The data must be a bytes instance. It is OK to send an empty bytes
instance. This can be used to get ssldata for a handshake initiated by
this endpoint.
Return a (ssldata, appdata) tuple. The ssldata element is a list of
buffers containing SSL data that needs to be sent to the remote SSL.
The appdata element is a list of buffers containing plaintext data that
needs to be forwarded to the application. The appdata list may contain
an empty buffer indicating an SSL "close_notify" alert. This alert must
be acknowledged by calling shutdown().
"""
if self._state == _UNWRAPPED:
# If unwrapped, pass plaintext data straight through.
if data:
appdata = [data]
else:
appdata = []
return ([], appdata)
self._need_ssldata = False
if data:
self._incoming.write(data)
ssldata = []
appdata = []
try:
if self._state == _DO_HANDSHAKE:
# Call do_handshake() until it doesn't raise anymore.
self._sslobj.do_handshake()
self._state = _WRAPPED
if self._handshake_cb:
self._handshake_cb(None)
if only_handshake:
return (ssldata, appdata)
# Handshake done: execute the wrapped block
if self._state == _WRAPPED:
# Main state: read data from SSL until close_notify
while True:
chunk = self._sslobj.read(self.max_size)
appdata.append(chunk)
if not chunk: # close_notify
break
elif self._state == _SHUTDOWN:
# Call shutdown() until it doesn't raise anymore.
self._sslobj.unwrap()
self._sslobj = None
self._state = _UNWRAPPED
if self._shutdown_cb:
self._shutdown_cb()
elif self._state == _UNWRAPPED:
# Drain possible plaintext data after close_notify.
appdata.append(self._incoming.read())
except (ssl.SSLError, ssl.CertificateError) as exc:
if getattr(exc, 'errno', None) not in (
ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
if self._state == _DO_HANDSHAKE and self._handshake_cb:
self._handshake_cb(exc)
raise
self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ)
# Check for record level data that needs to be sent back.
# Happens for the initial handshake and renegotiations.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
return (ssldata, appdata)
def feed_appdata(self, data, offset=0):
"""Feed plaintext data into the pipe.
Return an (ssldata, offset) tuple. The ssldata element is a list of
buffers containing record level data that needs to be sent to the
remote SSL instance. The offset is the number of plaintext bytes that
were processed, which may be less than the length of data.
NOTE: In case of short writes, this call MUST be retried with the SAME
buffer passed into the *data* argument (i.e. the id() must be the
same). This is an OpenSSL requirement. A further particularity is that
a short write will always have offset == 0, because the _ssl module
does not enable partial writes. And even though the offset is zero,
there will still be encrypted data in ssldata.
"""
assert 0 <= offset <= len(data)
if self._state == _UNWRAPPED:
# pass through data in unwrapped mode
if offset < len(data):
ssldata = [data[offset:]]
else:
ssldata = []
return (ssldata, len(data))
ssldata = []
view = memoryview(data)
while True:
self._need_ssldata = False
try:
if offset < len(view):
offset += self._sslobj.write(view[offset:])
except ssl.SSLError as exc:
# It is not allowed to call write() after unwrap() until the
# close_notify is acknowledged. We return the condition to the
# caller as a short write.
if exc.reason == 'PROTOCOL_IS_SHUTDOWN':
exc.errno = ssl.SSL_ERROR_WANT_READ
if exc.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
raise
self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ)
# See if there's any record level data back for us.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
if offset == len(view) or self._need_ssldata:
break
return (ssldata, offset)
class _SSLProtocolTransport(transports._FlowControlMixin,
transports.Transport):
def __init__(self, loop, ssl_protocol, app_protocol):
self._loop = loop
# SSLProtocol instance
self._ssl_protocol = ssl_protocol
self._app_protocol = app_protocol
self._closed = False
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._ssl_protocol._get_extra_info(name, default)
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) called
with None as its argument.
"""
self._closed = True
self._ssl_protocol._start_shutdown()
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if not self._closed:
warnings.warn("unclosed transport %r" % self, ResourceWarning)
self.close()
def pause_reading(self):
"""Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
self._ssl_protocol._transport.pause_reading()
def resume_reading(self):
"""Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
self._ssl_protocol._transport.resume_reading()
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to a
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
self._ssl_protocol._transport.set_write_buffer_limits(high, low)
def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
return self._ssl_protocol._transport.get_write_buffer_size()
def write(self, data):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError("data: expecting a bytes-like instance, got {!r}"
.format(type(data).__name__))
if not data:
return
self._ssl_protocol._write_appdata(data)
def can_write_eof(self):
"""Return True if this transport supports write_eof(), False if not."""
return False
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
self._ssl_protocol._abort()
class SSLProtocol(protocols.Protocol):
"""SSL protocol.
Implementation of SSL on top of a socket using incoming and outgoing
buffers which are ssl.MemoryBIO objects.
"""
def __init__(self, loop, app_protocol, sslcontext, waiter,
server_side=False, server_hostname=None):
if ssl is None:
raise RuntimeError('stdlib ssl module not available')
if not sslcontext:
sslcontext = _create_transport_context(server_side, server_hostname)
self._server_side = server_side
if server_hostname and not server_side:
self._server_hostname = server_hostname
else:
self._server_hostname = None
self._sslcontext = sslcontext
# SSL-specific extra info. More info are set when the handshake
# completes.
self._extra = dict(sslcontext=sslcontext)
# App data write buffering
self._write_backlog = collections.deque()
self._write_buffer_size = 0
self._waiter = waiter
self._loop = loop
self._app_protocol = app_protocol
self._app_transport = _SSLProtocolTransport(self._loop,
self, self._app_protocol)
# _SSLPipe instance (None until the connection is made)
self._sslpipe = None
self._session_established = False
self._in_handshake = False
self._in_shutdown = False
# transport, ex: SelectorSocketTransport
self._transport = None
def _wakeup_waiter(self, exc=None):
if self._waiter is None:
return
if not self._waiter.cancelled():
if exc is not None:
self._waiter.set_exception(exc)
else:
self._waiter.set_result(None)
self._waiter = None
def connection_made(self, transport):
"""Called when the low-level connection is made.
Start the SSL handshake.
"""
self._transport = transport
self._sslpipe = _SSLPipe(self._sslcontext,
self._server_side,
self._server_hostname)
self._start_handshake()
def connection_lost(self, exc):
"""Called when the low-level connection is lost or closed.
The argument is an exception object or None (the latter
meaning a regular EOF is received or the connection was
aborted or closed).
"""
if self._session_established:
self._session_established = False
self._loop.call_soon(self._app_protocol.connection_lost, exc)
self._transport = None
self._app_transport = None
def pause_writing(self):
"""Called when the low-level transport's buffer goes over
the high-water mark.
"""
self._app_protocol.pause_writing()
def resume_writing(self):
"""Called when the low-level transport's buffer drains below
the low-water mark.
"""
self._app_protocol.resume_writing()
def data_received(self, data):
"""Called when some SSL data is received.
The argument is a bytes object.
"""
try:
ssldata, appdata = self._sslpipe.feed_ssldata(data)
except ssl.SSLError as e:
if self._loop.get_debug():
logger.warning('%r: SSL error %s (reason %s)',
self, e.errno, e.reason)
self._abort()
return
for chunk in ssldata:
self._transport.write(chunk)
for chunk in appdata:
if chunk:
self._app_protocol.data_received(chunk)
else:
self._start_shutdown()
break
def eof_received(self):
"""Called when the other end of the low-level stream
is half-closed.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
"""
try:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
self._wakeup_waiter(ConnectionResetError)
if not self._in_handshake:
keep_open = self._app_protocol.eof_received()
if keep_open:
logger.warning('returning true from eof_received() '
'has no effect when using ssl')
finally:
self._transport.close()
def _get_extra_info(self, name, default=None):
if name in self._extra:
return self._extra[name]
else:
return self._transport.get_extra_info(name, default)
def _start_shutdown(self):
if self._in_shutdown:
return
self._in_shutdown = True
self._write_appdata(b'')
def _write_appdata(self, data):
self._write_backlog.append((data, 0))
self._write_buffer_size += len(data)
self._process_write_backlog()
def _start_handshake(self):
if self._loop.get_debug():
logger.debug("%r starts SSL handshake", self)
self._handshake_start_time = self._loop.time()
else:
self._handshake_start_time = None
self._in_handshake = True
# (b'', 1) is a special value in _process_write_backlog() to do
# the SSL handshake
self._write_backlog.append((b'', 1))
self._loop.call_soon(self._process_write_backlog)
def _on_handshake_complete(self, handshake_exc):
self._in_handshake = False
sslobj = self._sslpipe.ssl_object
try:
if handshake_exc is not None:
raise handshake_exc
peercert = sslobj.getpeercert()
if not hasattr(self._sslcontext, 'check_hostname'):
# Verify hostname if requested, Python 3.4+ uses check_hostname
# and checks the hostname in do_handshake()
if (self._server_hostname
and self._sslcontext.verify_mode != ssl.CERT_NONE):
ssl.match_hostname(peercert, self._server_hostname)
except BaseException as exc:
if self._loop.get_debug():
if isinstance(exc, ssl.CertificateError):
logger.warning("%r: SSL handshake failed "
"on verifying the certificate",
self, exc_info=True)
else:
logger.warning("%r: SSL handshake failed",
self, exc_info=True)
self._transport.close()
if isinstance(exc, Exception):
self._wakeup_waiter(exc)
return
else:
raise
if self._loop.get_debug():
dt = self._loop.time() - self._handshake_start_time
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
# Add extra info that becomes available after handshake.
self._extra.update(peercert=peercert,
cipher=sslobj.cipher(),
compression=sslobj.compression(),
ssl_object=sslobj,
)
self._app_protocol.connection_made(self._app_transport)
self._wakeup_waiter()
self._session_established = True
# In case transport.write() was already called. Don't call
# immediatly _process_write_backlog(), but schedule it:
# _on_handshake_complete() can be called indirectly from
# _process_write_backlog(), and _process_write_backlog() is not
# reentrant.
self._loop.call_soon(self._process_write_backlog)
def _process_write_backlog(self):
# Try to make progress on the write backlog.
if self._transport is None:
return
try:
for i in range(len(self._write_backlog)):
data, offset = self._write_backlog[0]
if data:
ssldata, offset = self._sslpipe.feed_appdata(data, offset)
elif offset:
ssldata = self._sslpipe.do_handshake(
self._on_handshake_complete)
offset = 1
else:
ssldata = self._sslpipe.shutdown(self._finalize)
offset = 1
for chunk in ssldata:
self._transport.write(chunk)
if offset < len(data):
self._write_backlog[0] = (data, offset)
# A short write means that a write is blocked on a read
# We need to enable reading if it is paused!
assert self._sslpipe.need_ssldata
if self._transport._paused:
self._transport.resume_reading()
break
# An entire chunk from the backlog was processed. We can
# delete it and reduce the outstanding buffer size.
del self._write_backlog[0]
self._write_buffer_size -= len(data)
except BaseException as exc:
if self._in_handshake:
# BaseExceptions will be re-raised in _on_handshake_complete.
self._on_handshake_complete(exc)
else:
self._fatal_error(exc, 'Fatal error on SSL transport')
if not isinstance(exc, Exception):
# BaseException
raise
def _fatal_error(self, exc, message='Fatal error on transport'):
# Should be called from exception handler only.
if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self._transport,
'protocol': self,
})
if self._transport:
self._transport._force_close(exc)
def _finalize(self):
if self._transport is not None:
self._transport.close()
def _abort(self):
if self._transport is not None:
try:
self._transport.abort()
finally:
self._finalize()
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field badges on 'UserProfile'
m2m_table_name = db.shorten_name(u'storybase_user_userprofile_badges')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('userprofile', models.ForeignKey(orm[u'storybase_user.userprofile'], null=False)),
('badge', models.ForeignKey(orm[u'storybase_badge.badge'], null=False))
))
db.create_unique(m2m_table_name, ['userprofile_id', 'badge_id'])
def backwards(self, orm):
# Removing M2M table for field badges on 'UserProfile'
db.delete_table(db.shorten_name(u'storybase_user_userprofile_badges'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'storybase_asset.asset': {
'Meta': {'object_name': 'Asset'},
'asset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'asset_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assets'", 'blank': 'True', 'to': u"orm['storybase_asset.DataSet']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assets'", 'null': 'True', 'to': u"orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'section_specific': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'storybase_asset.dataset': {
'Meta': {'object_name': 'DataSet'},
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'links_to_file': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'to': u"orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'})
},
u'storybase_badge.badge': {
'Meta': {'object_name': 'Badge'},
'description': ('django.db.models.fields.TextField', [], {}),
'icon_uri': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'storybase_geo.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['storybase_geo.GeoLevel']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'storybase_geo.location': {
'Meta': {'object_name': 'Location'},
'address': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'address2': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'location_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': u"orm['auth.User']"}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'storybase_geo.place': {
'Meta': {'object_name': 'Place'},
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True', 'blank': 'True'}),
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'_parents'", 'to': u"orm['storybase_geo.Place']", 'through': u"orm['storybase_geo.PlaceRelation']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'geolevel': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'places'", 'null': 'True', 'to': u"orm['storybase_geo.GeoLevel']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'place_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'})
},
u'storybase_geo.placerelation': {
'Meta': {'unique_together': "(('parent', 'child'),)", 'object_name': 'PlaceRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_parent'", 'to': u"orm['storybase_geo.Place']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_child'", 'to': u"orm['storybase_geo.Place']"})
},
u'storybase_story.story': {
'Meta': {'object_name': 'Story'},
'allow_connected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': u"orm['storybase_asset.Asset']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stories'", 'null': 'True', 'to': u"orm['auth.User']"}),
'badges': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'stories'", 'symmetrical': 'False', 'to': u"orm['storybase_badge.Badge']"}),
'byline': ('django.db.models.fields.TextField', [], {}),
'contact_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': u"orm['storybase_asset.DataSet']"}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_stories'", 'blank': 'True', 'to': u"orm['storybase_asset.Asset']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'locations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': u"orm['storybase_geo.Location']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': u"orm['storybase_user.Organization']"}),
'places': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': u"orm['storybase_geo.Place']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': u"orm['storybase_user.Project']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'related_to'", 'blank': 'True', 'through': u"orm['storybase_story.StoryRelation']", 'to': u"orm['storybase_story.Story']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'story_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'structure_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'template_story': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'template_for'", 'null': 'True', 'to': u"orm['storybase_story.Story']"}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': u"orm['storybase_taxonomy.Category']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'storybase_story.storyrelation': {
'Meta': {'object_name': 'StoryRelation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'relation_type': ('django.db.models.fields.CharField', [], {'default': "'connected'", 'max_length': '25'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'target'", 'to': u"orm['storybase_story.Story']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source'", 'to': u"orm['storybase_story.Story']"})
},
u'storybase_taxonomy.category': {
'Meta': {'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['storybase_taxonomy.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'storybase_taxonomy.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'tag_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'storybase_taxonomy.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'storybase_taxonomy_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['storybase_taxonomy.Tag']"})
},
u'storybase_user.organization': {
'Meta': {'object_name': 'Organization'},
'contact_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_organizations'", 'blank': 'True', 'through': u"orm['storybase_user.OrganizationStory']", 'to': u"orm['storybase_story.Story']"}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_organizations'", 'blank': 'True', 'to': u"orm['storybase_asset.Asset']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'through': u"orm['storybase_user.OrganizationMembership']", 'to': u"orm['auth.User']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organization_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'storybase_user.organizationmembership': {
'Meta': {'object_name': 'OrganizationMembership'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member_type': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '140'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['storybase_user.Organization']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'storybase_user.organizationstory': {
'Meta': {'object_name': 'OrganizationStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['storybase_user.Organization']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'storybase_user.organizationtranslation': {
'Meta': {'unique_together': "(('organization', 'language'),)", 'object_name': 'OrganizationTranslation'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['storybase_user.Organization']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'storybase_user.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_projects'", 'blank': 'True', 'through': u"orm['storybase_user.ProjectStory']", 'to': u"orm['storybase_story.Story']"}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_projects'", 'blank': 'True', 'to': u"orm['storybase_asset.Asset']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'through': u"orm['storybase_user.ProjectMembership']", 'to': u"orm['auth.User']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': u"orm['storybase_user.Organization']"}),
'project_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'storybase_user.projectmembership': {
'Meta': {'object_name': 'ProjectMembership'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member_type': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '140'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['storybase_user.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'storybase_user.projectstory': {
'Meta': {'object_name': 'ProjectStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['storybase_user.Project']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'storybase_user.projecttranslation': {
'Meta': {'unique_together': "(('project', 'language'),)", 'object_name': 'ProjectTranslation'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['storybase_user.Project']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'storybase_user.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'badges': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users'", 'symmetrical': 'False', 'to': u"orm['storybase_badge.Badge']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notify_admin': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_digest': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_story_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_story_published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_story_unpublished': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'profile_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['storybase_user']
|
|
"""
corpkit: process CONLL formatted data
"""
def parse_conll(f,
first_time=False,
just_meta=False,
usecols=None):
"""
Make a pandas.DataFrame with metadata from a CONLL-U file
Args:
f (str): Filepath
first_time (bool, optional): If True, add in sent index
just_meta (bool, optional): Return only a metadata `dict`
usecols (None, optional): Which columns must be parsed by pandas.read_csv
Returns:
pandas.DataFrame: DataFrame containing tokens and a ._metadata attribute
"""
import pandas as pd
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from collections import defaultdict
# go to corpkit.constants to modify the order of columns if yours are different
from corpkit.constants import CONLL_COLUMNS as head
with open(f, 'r') as fo:
data = fo.read().strip('\n')
splitdata = []
metadata = {}
sents = data.split('\n\n')
for count, sent in enumerate(sents, start=1):
metadata[count] = defaultdict(set)
for line in sent.split('\n'):
if line and not line.startswith('#') \
and not just_meta:
splitdata.append('\n%d\t%s' % (count, line))
else:
line = line.lstrip('# ')
if '=' in line:
field, val = line.split('=', 1)
metadata[count][field].add(val)
metadata[count] = {k: ','.join(v) for k, v in metadata[count].items()}
if just_meta:
return metadata
# happens with empty files
if not splitdata:
return
# head can only be as long as the list of cols in the df
num_tabs = splitdata[0].strip('\t').count('\t')
head = head[:num_tabs]
# introduce sentence index for multiindex
#for i, d in enumerate(splitdata, start=1):
# d = d.replace('\n', '\n%s\t' % str(i))
# splitdata[i-1] = d
# turn into something pandas can read
data = '\n'.join(splitdata)
data = data.replace('\n\n', '\n') + '\n'
# remove slashes as early as possible
data = data.replace('/', '-slash-')
# open with sent and token as multiindex
try:
df = pd.read_csv(StringIO(data), sep='\t', header=None,
names=['s'] + head, index_col=['s', 'i'], usecols=usecols)
#df.index = pd.MultiIndex.from_tuples([(1, i) for i in df.index])
except ValueError:
return
df._metadata = metadata
return df
def get_dependents_of_id(idx, df=False, repeat=False, attr=False, coref=False):
"""
Get dependents of a token
"""
sent_id, tok_id = getattr(idx, 'name', idx)
deps = df.ix[sent_id, tok_id]['d'].split(',')
out = []
for govid in deps:
if attr:
# might not exist...
try:
tok = getattr(df.ix[sent_id,int(govid)], attr, False)
if tok:
out.append(tok)
except (KeyError, IndexError):
pass
else:
out.append((sent_id, int(govid)))
return out
def get_governors_of_id(idx, df=False, repeat=False, attr=False, coref=False):
"""
Get governors of a token
"""
# it can be a series or a tuple
sent_id, tok_id = getattr(idx, 'name', idx)
# get the governor id
govid = df['g'].loc[sent_id, tok_id]
if attr:
return getattr(df.loc[sent_id,govid], attr, 'root')
return [(sent_id, govid)]
def get_match(idx, df=False, repeat=False, attr=False, **kwargs):
"""
Dummy function, for the most part
"""
sent_id, tok_id = getattr(idx, 'name', idx)
if attr:
return df[attr].ix[sent_id, tok_id]
return [(sent_id, tok_id)]
def get_head(idx, df=False, repeat=False, attr=False, **kwargs):
"""
Get the head of a 'constituent'---'
for 'corpus linguistics', if 'corpus' is searched, return 'linguistics'
"""
sent_id, tok_id = getattr(idx, 'name', idx)
#sent = df.ix[sent_id]
token = df.ix[sent_id, tok_id]
if not hasattr(token, 'c'):
# this should error, because the data isn't there at all
lst_of_ixs = [(sent_id, tok_id)]
elif token['c'] == '_':
lst_of_ixs = [(sent_id, tok_id)]
# if it is the head, return it
elif token['c'].endswith('*'):
lst_of_ixs = [(sent_id, tok_id)]
else:
# should be able to speed this one up!
just_same_coref = df.loc[sent_id][df.loc[sent_id]['c'] == token['c'] + '*']
if not just_same_coref.empty:
lst_of_ixs = [(sent_id, i) for i in just_same_coref.index]
else:
lst_of_ixs = [(sent_id, tok_id)]
if attr:
lst_of_ixs = [df.loc[i][attr] for i in lst_of_ixs]
return lst_of_ixs
def get_representative(idx,
df=False,
repeat=False,
attr=False,
**kwargs):
"""
Get the representative coref head
"""
sent_id, tok_id = getattr(idx, 'name', idx)
token = df.ix[sent_id, tok_id]
# if no corefs at all
if not hasattr(token, 'c'):
# this should error, because the data isn't there at all
lst_of_ixs = [(sent_id, tok_id)]
# if no coref available
elif token['c'] == '_':
lst_of_ixs = [(sent_id, tok_id)]
else:
just_same_coref = df.loc[df['c'] == token['c'] + '*']
if not just_same_coref.empty:
lst_of_ixs = [just_same_coref.iloc[0].name]
else:
lst_of_ixs = [(sent_id, tok_id)]
if attr:
lst_of_ixs = [df.ix[i][attr] for i in lst_of_ixs]
return lst_of_ixs
def get_all_corefs(s, i, df, coref=False):
# if not in coref mode, skip
if not coref:
return [(s, i)]
# if the word was not a head, forget it
if not df.ix[s,i]['c'].endswith('*'):
return [(s, i)]
try:
# get any other mention head for this coref chain
just_same_coref = df.loc[df['c'] == df.ix[s,i]['c']]
return list(just_same_coref.index)
except:
return [(s, i)]
def search_this(df, obj, attrib, pattern, adjacent=False, coref=False):
"""
Search the dataframe for a single criterion
"""
import re
out = []
# if searching by head, they need to be heads
if obj == 'h':
df = df.loc[df['c'].endswith('*')]
# cut down to just tokens with matching attr
# but, if the pattern is 'any', don't bother
if hasattr(pattern, 'pattern') and pattern.pattern == r'.*':
matches = df
else:
matches = df[df[attrib].fillna('').str.contains(pattern)]
# functions for getting the needed object
revmapping = {'g': get_dependents_of_id,
'd': get_governors_of_id,
'm': get_match,
'h': get_all_corefs,
'r': get_representative}
getfunc = revmapping.get(obj)
for idx in list(matches.index):
if adjacent:
if adjacent[0] == '+':
tomove = -int(adj[1])
elif adjacent[0] == '-':
tomove = int(adj[1])
idx = (idx[0], idx[1] + tomove)
for mindex in getfunc(idx, df=df, coref=coref):
if mindex:
out.append(mindex)
return list(set(out))
def show_fix(show):
"""show everything"""
objmapping = {'d': get_dependents_of_id,
'g': get_governors_of_id,
'm': get_match,
'h': get_head}
out = []
for val in show:
adj, val = determine_adjacent(val)
obj, attr = val[0], val[-1]
obj_getter = objmapping.get(obj)
out.append(adj, val, obj, attr, obj_getter)
return out
def dummy(x, *args, **kwargs):
return x
def format_toks(to_process, show, df):
"""
Format matches by show values
"""
import pandas as pd
objmapping = {'d': get_dependents_of_id,
'g': get_governors_of_id,
'm': get_match,
'h': get_head}
sers = []
dmode = any(x.startswith('d') for x in show)
if dmode:
from collections import defaultdict
dicts = defaultdict(dict)
for val in show:
adj, val = determine_adjacent(val)
if adj:
if adj[0] == '+':
tomove = int(adj[1])
elif adj[0] == '-':
tomove = -int(adj[1])
obj, attr = val[0], val[-1]
func = objmapping.get(obj, dummy)
out = defaultdict(dict) if dmode else []
for ix in list(to_process.index):
piece = False
if adj:
ix = (ix[0], ix[1] + tomove)
if ix not in df.index:
piece = 'none'
if not piece:
if obj == 'm':
piece = df.loc[ix][attr.replace('x', 'p')]
if attr == 'x':
from corpkit.dictionaries.word_transforms import taglemma
piece = taglemma.get(piece.lower(), piece.lower())
piece = [piece]
else:
piece = func(ix, df=df, attr=attr)
if not isinstance(piece, list):
piece = [piece]
if dmode:
dicts[ix][val] = piece
else:
out.append(piece[0])
if not dmode:
ser = pd.Series(out, index=to_process.index)
ser.name = val
sers.append(ser)
if not dmode:
dx = pd.concat(sers, axis=1)
if len(dx.columns) == 1:
return dx.iloc[:,0]
else:
return dx.apply('/'.join, axis=1)
else:
index = []
data = []
for ix, dct in dicts.items():
max_key, max_value = max(dct.items(), key=lambda x: len(x[1]))
for val, pieces in dct.items():
if len(pieces) == 1:
dicts[ix][val] = pieces * len(max_value)
for tup in list(zip(*[i for i in dct.values()])):
index.append(ix)
data.append('/'.join(tup))
return pd.Series(data, index=pd.MultiIndex.from_tuples(index))
def make_series(ser, df=False, obj=False,
att=False, adj=False):
"""
To apply to a DataFrame to add complex criteria, like 'gf'
"""
# distance mode
if att == 'a':
count = 0
if obj == 'g':
if ser[obj] == 0:
return '-1'
ser = df.loc[ser.name[0], ser['g']]
while count < 20:
if ser['mf'].lower() == 'root':
return str(count)
ser = df.loc[ser.name[0], ser['g']]
count += 1
return '20+'
# h is head of this particular group
if obj == 'h':
cohead = ser['c']
if cohead.endswith('*'):
return ser['m' + att]
elif cohead == '_':
return 'none'
else:
sent = df.loc[ser.name[0]]
just_cof = sent[sent['c'] == cohead + '*']
if just_cof.empty:
return ser['m' + att]
else:
return just_cof.iloc[0]['m' + att]
# r is the representative mention head
if obj == 'r':
cohead = ser['c']
if cohead == '_':
return 'none'
if not cohead.endswith('*'):
cohead = cohead + '*'
# iterrows is slow, but we only need the first instance
just_cof = df[df['c'] == cohead]
if just_cof.empty:
return ser['m' + att]
else:
return just_cof.iloc[0]['m' + att]
if obj == 'g':
if ser[obj] == 0:
return 'root'
else:
try:
return df[att][ser.name[0], ser[obj]]
# this keyerror can happen if governor is punctuation, for example
except KeyError:
return
# if dependent, we need to return a df-like thing instead
elif obj == 'd':
#import pandas as pd
idxs = [(ser.name[0], int(i)) for i in ser[obj].split(',')]
dat = df[att].ix[idxs]
return dat
# todo: fix everything below here
elif obj == 'r': # get the representative
cohead = ser['c'].rstrip('*')
refs = df[df['c'] == cohead + '*']
return refs[att].ix[0]
elif obj == 'h': # get head
cohead = ser['c']
if cohead.endswith('*'):
return ser[att]
else:
sent = df[att].loc[ser.name[0]]
return sent[sent['c'] == cohead + '*']
# potential naming conflict with sent index ...
elif obj == 's': # get whole phrase"
cohead = ser['c']
sent = df[att].loc[ser.name[0]]
return sent[sent['c'] == cohead.rstrip('*')].values
def joiner(ser):
return ser.str.cat(sep='/')
def make_new_for_dep(dfmain, dfdep, name):
"""
If showind dependent, we have to make a whole new dataframe
:param dfmain: dataframe with everything in it
:param dfdep: dataframe with just dependent
"""
import pandas as pd
import numpy as np
new = []
newd = []
index = []
for (i, ml), (_, dl) in zip(dfmain.iterrows(), dfdep.iterrows()):
if all(pd.isnull(i) for i in dl.values):
index.append(i)
new.append(ml)
newd.append('none')
continue
else:
for bit in dl:
if pd.isnull(bit):
continue
index.append(i)
new.append(ml)
newd.append(bit)
#todo: account for no matches
index = pd.MultiIndex.from_tuples(index, names=['s', 'i'])
newdf = pd.DataFrame(new, index=index)
newdf[name] = newd
return newdf
def turn_pos_to_wc(ser, showval):
if not showval:
return ser
import pandas as pd
from corpkit.dictionaries.word_transforms import taglemma
vals = [taglemma.get(piece.lower(), piece.lower())
for piece in ser.values]
news = pd.Series(vals, index=ser.index)
news.name = ser.name[:-1] + 'x'
return news
def concline_generator(matches, idxs, df, metadata,
add_meta, category, fname, preserve_case=False):
"""
Get all conclines
:param matches: a list of formatted matches
:param idxs: their (sent, word) idx
"""
conc_res = []
# potential speedup: turn idxs into dict
from collections import defaultdict
mdict = defaultdict(list)
# if remaking idxs here, don't need to do it earlier
idxs = list(matches.index)
for mid, (s, i) in zip(matches, idxs):
#for s, i in matches:
mdict[s].append((i, mid))
# shorten df to just relevant sents to save lookup time
df = df.loc[list(mdict.keys())]
# don't look up the same sentence multiple times
for s, tup in sorted(mdict.items()):
sent = df.loc[s]
if not preserve_case:
sent = sent.str.lower()
meta = metadata[s]
sname = meta.get('speaker', 'none')
for i, mid in tup:
if not preserve_case:
mid = mid.lower()
ix = '%d,%d' % (s, i)
start = ' '.join(sent.loc[:i-1].values)
end = ' '.join(sent.loc[i+1:].values)
lin = [ix, category, fname, sname, start, mid, end]
if add_meta:
for k, v in sorted(meta.items()):
if k in ['speaker', 'parse', 'sent_id']:
continue
if isinstance(add_meta, list):
if k in add_meta:
lin.append(v)
elif add_meta is True:
lin.append(v)
conc_res.append(lin)
return conc_res
def p_series_to_x_series(val):
return taglemma.get(val.lower(), val.lower())
def fast_simple_conc(dfss, idxs, show,
metadata=False,
add_meta=False,
fname=False,
category=False,
only_format_match=True,
conc=False,
preserve_case=False,
gramsize=1,
window=None):
"""
Fast, simple concordancer, heavily conditional
to save time.
"""
if dfss.empty:
return [], []
import pandas as pd
# best case, the user doesn't want any gov-dep stuff
simple = all(i.startswith('m') and not i.endswith('a') for i in show)
# worst case, the user wants something from dep
dmode = any(x.startswith('d') for x in show)
# make a quick copy if need be because we modify the df
df = dfss.copy() if not simple else dfss
# add text to df columns so that it resembles 'show' values
lst = ['s', 'i', 'w', 'l', 'e', 'p', 'f']
# for ner, change O to 'none'
if 'e' in df.columns:
df['e'] = df['e'].str.replace('^O$', 'none')
df.columns = ['m' + i if len(i) == 1 and i in lst \
else i for i in list(df.columns)]
# this is the data needed for concordancing
df_for_lr = df['mw'] if only_format_match else df
just_matches = df.loc[idxs]
# if the showing can't come straight out of the df,
# we can add columns with the necessary information
if not simple:
formatted = []
import numpy as np
for ind, i in enumerate(show):
# nothing to do if it's an m feature
if i.startswith('m') and not i.endswith('a'):
continue
# defaults for adjacent work
adj, tomove, adjname = False, False, ''
adj, i = determine_adjacent(i)
adjname = ''.join(adj) if hasattr(adj, '__iter__') else ''
# get number of places to shift left or right
if adj:
if adj[0] == '+':
tomove = -int(adj[1])
elif adj[0] == '-':
tomove = int(adj[1])
# cut df down to just needed bits for the sake of speed
# i.e. if we want gov func, get only gov and func cols
ob, att = i[0], i[-1]
xmode = att == 'x'
if xmode:
att = 'p'
show[ind] = show[ind][:-1] + 'p'
# for corefs, we also need the coref data
if ob in ['h', 'r']:
dfx = df[['c', 'm' + att]]
else:
lst = ['s', 'i', 'w', 'l', 'f', 'p']
if att in lst and ob != 'm':
att = 'm' + att
if ob == 'm' and att != 'a':
dfx = df[['m' + att]]
elif att == 'a':
dfx = df[['mf', 'g']]
else:
dfx = df[[ob, att]]
# decide if we need to format everything
if (not conc or only_format_match) and not adj:
to_proc = just_matches
else:
to_proc = df
# now we get or generate the new column
if ob == 'm' and att != 'a':
ser = to_proc['m' + att]
else:
ser = to_proc.apply(make_series, df=dfx, obj=ob, att=att, axis=1)
if xmode:
ser = ser.apply(p_series_to_x_series)
# adjmode simply shifts series and index
if adj:
#todo: this shifts next sent into previous sent!
ser = ser.shift(tomove)
ser = ser.fillna('none')
# dependent mode produces multiple matches
# so, we have to make a new dataframe with duplicate indexes
# todo: what about when there are two dep options?
ser.name = adjname + i
if ob != 'd':
df[ser.name] = ser
else:
df = make_new_for_dep(df, ser, i)
df = df.fillna('none')
# x is wordclass. so, we just get pos and translate it
nshow = [(i.replace('x', 'p'), i.endswith('x')) for i in show]
# generate a series of matches with slash sep if multiple show vals
if len(nshow) > 1:
if conc and not only_format_match:
first = turn_pos_to_wc(df[nshow[0][0]], nshow[0][1])
llist = [turn_pos_to_wc(df[sho], xmode) for sho, xmode in nshow[1:]]
df = first.str.cat(others=llist, sep='/')
matches = df[idxs]
else:
justm = df.loc[idxs]
first = turn_pos_to_wc(justm[nshow[0][0]], nshow[0][1])
llist = [turn_pos_to_wc(justm[sho], xmode) for sho, xmode in nshow[1:]]
matches = first.str.cat(others=llist, sep='/')
if conc:
df = df_for_lr
else:
if conc and not only_format_match:
df = turn_pos_to_wc(df[nshow[0][0]], nshow[0][1])
matches = df[idxs]
else:
matches = turn_pos_to_wc(df[nshow[0][0]][idxs], nshow[0][1])
if conc:
df = df_for_lr
# get rid of (e.g.) nan caused by no_punct=True
matches = matches.dropna(axis=0, how='all')
if not preserve_case:
matches = matches.str.lower()
if not conc:
# todo: is matches.values faster?
return list(matches), []
else:
conc_res = concline_generator(matches, idxs, df,
metadata, add_meta,
category, fname,
preserve_case=preserve_case)
return list(matches), conc_res
def make_collocate_show(show, current):
"""
Turn show into a collocate showing thing
"""
out = []
for i in show:
out.append(i)
for i in show:
newn = '%s%s' % (str(current), i)
if not newn.startswith('-'):
newn = '+' + newn
out.append(newn)
return out
def show_this(df, matches, show, metadata, conc=False,
coref=False, category=False, show_conc_metadata=False, **kwargs):
only_format_match = kwargs.pop('only_format_match', True)
ngram_mode = kwargs.get('ngram_mode', True)
preserve_case = kwargs.get('preserve_case', False)
gramsize = kwargs.get('gramsize', 1)
window = kwargs.get('window', None)
matches = sorted(list(matches))
# add index as column if need be
if any(i.endswith('s') for i in show):
df['ms'] = [str(i) for i in df.index.labels[0]]
if any(i.endswith('i') for i in show):
df['mi'] = [str(i) for i in df.index.labels[1]]
# attempt to leave really fast
if kwargs.get('countmode'):
return len(matches), {}
if len(show) == 1 and not conc and gramsize == 1 and not window:
if show[0] in ['ms', 'mi', 'mw', 'ml', 'mp', 'mf']:
get_fast = df.loc[matches][show[0][-1]]
if not preserve_case:
get_fast = get_fast.str.lower()
return list(get_fast), {}
# todo: make work for ngram, collocate and coref
if all(i[0] in ['m', 'g', '+', '-', 'd', 'h', 'r'] for i in show):
if gramsize == 1 and not window:
return fast_simple_conc(df,
matches,
show,
metadata,
show_conc_metadata,
kwargs.get('filename', ''),
category,
only_format_match,
conc=conc,
preserve_case=preserve_case,
gramsize=gramsize,
window=window)
else:
resbit = []
concbit = []
iterab = range(1, gramsize + 1) if gramsize > 1 else range(-window, window+1)
for i in iterab:
if i == 0:
continue
if window:
nnshow = make_collocate_show(show, i)
else:
nnshow = show
r, c = fast_simple_conc(df,
matches,
nnshow,
metadata,
show_conc_metadata,
kwargs.get('filename', ''),
category,
only_format_match,
conc=conc,
preserve_case=preserve_case,
gramsize=gramsize,
window=window)
resbit.append(r)
concbit.append(c)
if not window:
df = df.shift(1)
df = df.fillna('none')
resbit = list(zip(*resbit))
concbit = list(zip(*concbit))
out = []
conc_out = []
# this is slow but keeps the order
# remove it esp for resbit where it doesn't matter
for r in resbit:
for b in r:
out.append(b)
for c in concbit:
for b in c:
conc_out.append(b)
return out, conc_out
def remove_by_mode(matches, mode, criteria):
"""
If mode is all, remove any entry that occurs < len(criteria)
"""
if mode == 'any':
return set(matches)
if mode == 'all':
from collections import Counter
counted = Counter(matches)
return set(k for k, v in counted.items() if v >= len(criteria))
def determine_adjacent(original):
"""
Figure out if we're doing an adjacent location, get the co-ordinates
and return them and the stripped original
"""
if original[0] in ['+', '-']:
adj = (original[0], original[1:-2])
original = original[-2:]
else:
adj = False
return adj, original
def cut_df_by_metadata(df, metadata, criteria, coref=False,
feature='speaker', method='just'):
"""
Keep or remove parts of the DataFrame based on metadata criteria
"""
if not criteria:
df._metadata = metadata
return df
# maybe could be sped up, but let's not for now:
if coref:
df._metadata = metadata
return df
import re
good_sents = []
new_metadata = {}
from corpkit.constants import STRINGTYPE
# could make the below more elegant ...
for sentid, data in sorted(metadata.items()):
meta_value = data.get(feature, 'none')
lst_met_vl = meta_value.split(';')
if isinstance(criteria, (list, set, tuple)):
criteria = [i.lower() for i in criteria]
if method == 'just':
if any(i.lower() in criteria for i in lst_met_vl):
good_sents.append(sentid)
new_metadata[sentid] = data
elif method == 'skip':
if not any(i in criteria for i in lst_met_vl):
good_sents.append(sentid)
new_metadata[sentid] = data
elif isinstance(criteria, (re._pattern_type, STRINGTYPE)):
if method == 'just':
if any(re.search(criteria, i, re.IGNORECASE) for i in lst_met_vl):
good_sents.append(sentid)
new_metadata[sentid] = data
elif method == 'skip':
if not any(re.search(criteria, i, re.IGNORECASE) for i in lst_met_vl):
good_sents.append(sentid)
new_metadata[sentid] = data
df = df.loc[good_sents]
df = df.fillna('')
df._metadata = new_metadata
return df
def cut_df_by_meta(df, just_metadata, skip_metadata):
"""
Reshape a DataFrame based on filters
"""
if df is not None:
if just_metadata:
for k, v in just_metadata.items():
df = cut_df_by_metadata(df, df._metadata, v, feature=k)
if skip_metadata:
for k, v in skip_metadata.items():
df = cut_df_by_metadata(df, df._metadata, v, feature=k, method='skip')
return df
def tgrep_searcher(f=False,
metadata=False,
from_df=False,
search=False,
searchmode=False,
exclude=False,
excludemode=False,
translated_option=False,
subcorpora=False,
conc=False,
root=False,
preserve_case=False,
countmode=False,
show=False,
lem_instance=False,
lemtag=False,
category=False,
fname=False,
show_conc_metadata=False,
only_format_match=True,
**kwargs):
"""
Use tgrep for constituency grammar search
"""
from corpkit.process import show_tree_as_per_option, tgrep
matches = []
conc_out = []
# in case search was a dict
srch = search.get('t') if isinstance(search, dict) else search
metcat = category if category else ''
for i, sent in metadata.items():
results = tgrep(sent['parse'], srch)
sname = sent.get('speaker')
metcat = category
for res in results:
tok_id, start, middle, end = show_tree_as_per_option(show, res, sent,
df=from_df, sent_id=i, conc=conc,
only_format_match=only_format_match)
#middle, idx = show_tree_as_per_option(show, res, 'conll', sent, df=df, sent_id=i)
matches.append(middle)
if conc:
form_ix = '%d,%d' % (i, tok_id)
lin = [form_ix, metcat, fname, sname, start, middle, end]
if show_conc_metadata:
for k, v in sorted(sent.items()):
if k in ['speaker', 'parse', 'sent_id']:
continue
if isinstance(show_conc_metadata, list):
if k in show_conc_metadata:
lin.append(v)
elif show_conc_metadata is True:
lin.append(v)
conc_out.append(lin)
return matches, conc_out
def slow_tregex(metadata=False,
search=False,
searchmode=False,
exclude=False,
excludemode=False,
translated_option=False,
subcorpora=False,
conc=False,
root=False,
preserve_case=False,
countmode=False,
show=False,
lem_instance=False,
lemtag=False,
from_df=False,
fname=False,
category=False,
only_format_match=False,
**kwargs):
"""
Do the metadata specific version of tregex queries
"""
from corpkit.process import tregex_engine, format_tregex, make_conc_lines_from_whole_mid
if isinstance(search, dict):
search = list(search.values())[0]
speak_tree = [(x.get(subcorpora, 'none'), x['parse']) for x in metadata.values()]
if speak_tree:
speak, tree = list(zip(*speak_tree))
else:
speak, tree = [], []
if all(not x for x in speak):
speak = False
to_open = '\n'.join(tree)
concs = []
if not to_open.strip('\n'):
if subcorpora:
return {}, {}
ops = ['-%s' % i for i in translated_option] + ['-o', '-n']
res = tregex_engine(query=search,
options=ops,
corpus=to_open,
root=root,
preserve_case=preserve_case,
speaker_data=False)
res = format_tregex(res, show, exclude=exclude, excludemode=excludemode,
translated_option=translated_option,
lem_instance=lem_instance, countmode=countmode, speaker_data=False,
lemtag=lemtag)
if not res:
if subcorpora:
return [], []
if conc:
ops += ['-w']
whole_res = tregex_engine(query=search,
options=ops,
corpus=to_open,
root=root,
preserve_case=preserve_case,
speaker_data=speak)
# format match too depending on option
if not only_format_match:
whole_res = format_tregex(whole_res, show, exclude=exclude, excludemode=excludemode,
translated_option=translated_option,
lem_instance=lem_instance, countmode=countmode,
speaker_data=speak, whole=True,
lemtag=lemtag)
# make conc lines from conc results
concs = make_conc_lines_from_whole_mid(whole_res, res, filename=fname, show=show)
else:
concs = [False for i in res]
if len(res) > 0 and isinstance(res[0], tuple):
res = [i[-1] for i in res]
if countmode:
if isinstance(res, int):
return res, False
else:
return len(res), False
else:
return res, concs
def get_stats(from_df=False, metadata=False, feature=False, root=False, **kwargs):
"""
Get general statistics for a DataFrame
"""
import re
from corpkit.dictionaries.process_types import processes
from collections import Counter, defaultdict
from corpkit.process import tregex_engine
def ispunct(s):
import string
return all(c in string.punctuation for c in s)
tree = [x['parse'] for x in metadata.values()]
tregex_qs = {'Imperative': r'ROOT < (/(S|SBAR)/ < (VP !< VBD !< VBG !$ NP !$ SBAR < NP !$-- S '\
'!$-- VP !$ VP)) !<< (/\?/ !< __) !<<- /-R.B-/ !<<, /(?i)^(-l.b-|hi|hey|hello|oh|wow|thank|thankyou|thanks|welcome)$/',
'Open interrogative': r'ROOT < SBARQ <<- (/\?/ !< __)',
'Closed interrogative': r'ROOT ( < (SQ < (NP $+ VP)) << (/\?/ !< __) | < (/(S|SBAR)/ < (VP $+ NP)) <<- (/\?/ !< __))',
'Unmodalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP !< MD)))',
'Modalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP < MD)))',
'Clauses': r'/^S/ < __',
'Interrogative': r'ROOT << (/\?/ !< __)',
'Processes': r'/VB.?/ >># (VP !< VP >+(VP) /^(S|ROOT)/)'}
result = Counter()
for name in tregex_qs.keys():
result[name] = 0
result['Sentences'] = len(set(from_df.index.labels[0]))
result['Passives'] = len(from_df[from_df['f'] == 'nsubjpass'])
result['Tokens'] = len(from_df)
# the below has returned a float before. i assume actually a nan?
result['Words'] = len([w for w in list(from_df['w']) if w and not ispunct(str(w))])
result['Characters'] = sum([len(str(w)) for w in list(from_df['w']) if w])
result['Open class'] = sum([1 for x in list(from_df['p']) if x and x[0] in ['N', 'J', 'V', 'R']])
result['Punctuation'] = result['Tokens'] - result['Words']
result['Closed class'] = result['Words'] - result['Open class']
to_open = '\n'.join(tree)
if not to_open.strip('\n'):
return {}, {}
for name, q in sorted(tregex_qs.items()):
options = ['-o', '-t'] if name == 'Processes' else ['-o']
# c option removed, could cause memory problems
#ops = ['-%s' % i for i in translated_option] + ['-o', '-n']
res = tregex_engine(query=q,
options=options,
corpus=to_open,
root=root)
#res = format_tregex(res)
if not res:
continue
concs = [False for i in res]
for (_, met, r), line in zip(res, concs):
result[name] = len(res)
if name != 'Processes':
continue
non_mat = 0
for ptype in ['mental', 'relational', 'verbal']:
reg = getattr(processes, ptype).words.as_regex(boundaries='l')
count = len([i for i in res if re.search(reg, i[-1])])
nname = ptype.title() + ' processes'
result[nname] = count
if root:
root.update()
return result, {}
def get_corefs(df, matches):
"""
Add corefs to a set of matches
"""
out = set()
df = df['c']
for s, i in matches:
# keep original
out.add((s,i))
coline = df[(s, i)]
if coline.endswith('*'):
same_co = df[df == coline]
for ix in same_co.index:
out.add(ix)
return out
def pipeline(f=False,
search=False,
show=False,
exclude=False,
searchmode='all',
excludemode='any',
conc=False,
coref=False,
from_df=False,
just_metadata=False,
skip_metadata=False,
category=False,
show_conc_metadata=False,
statsmode=False,
search_trees=False,
lem_instance=False,
**kwargs):
"""
A basic pipeline for conll querying---some options still to do
"""
if isinstance(show, str):
show = [show]
all_matches = []
all_exclude = []
if from_df is False or from_df is None:
df = parse_conll(f, usecols=kwargs.get('usecols'))
# can fail here if df is none
if df is None:
print('Problem reading data from %s.' % f)
return [], []
metadata = df._metadata
else:
df = from_df
metadata = kwargs.pop('metadata')
feature = kwargs.pop('by_metadata', False)
df = cut_df_by_meta(df, just_metadata, skip_metadata)
searcher = pipeline
if statsmode:
searcher = get_stats
if search_trees == 'tregex':
searcher = slow_tregex
elif search_trees == 'tgrep':
searcher = tgrep_searcher
if feature:
if df is None:
print('Problem reading data from %s.' % f)
return {}, {}
# determine searcher
resultdict = {}
concresultdict = {}
# get all the possible values in the df for the feature of interest
all_cats = set([i.get(feature, 'none') for i in list(df._metadata.values())])
for category in all_cats:
new_df = cut_df_by_metadata(df, df._metadata, category, feature=feature, method='just')
r, c = searcher(f=False,
fname=f,
search=search,
exclude=exclude,
show=show,
searchmode=searchmode,
excludemode=excludemode,
conc=conc,
coref=coref,
from_df=new_df,
by_metadata=False,
category=category,
show_conc_metadata=show_conc_metadata,
lem_instance=lem_instance,
root=kwargs.pop('root', False),
subcorpora=feature,
metadata=new_df._metadata,
**kwargs)
resultdict[category] = r
concresultdict[category] = c
return resultdict, concresultdict
if df is None:
print('Problem reading data from %s.' % f)
return [], []
kwargs['ngram_mode'] = any(x.startswith('n') for x in show)
#df = cut_df_by_metadata(df, df._metadata, kwargs.get('just_speakers'), coref=coref)
metadata = df._metadata
try:
df['w'].str
except AttributeError:
raise AttributeError("CONLL data doesn't match expectations. " \
"Try the corpus.conll_conform() method to " \
"convert the corpus to the latest format.")
if kwargs.get('no_punct', True):
df = df[df['w'].fillna('').str.contains(kwargs.get('is_a_word', r'[A-Za-z0-9]'))]
# remove brackets --- could it be done in one regex?
df = df[~df['w'].str.contains(r'^-.*B-$')]
if kwargs.get('no_closed'):
from corpkit.dictionaries import wordlists
crit = wordlists.closedclass.as_regex(boundaries='l', case_sensitive=False)
df = df[~df['w'].str.contains(crit)]
if statsmode:
return get_stats(df, metadata, False, root=kwargs.pop('root', False), **kwargs)
elif search_trees:
return searcher(from_df=df,
search=search,
searchmode=searchmode,
exclude=exclude,
excludemode=excludemode,
conc=conc,
by_metadata=False,
metadata=metadata,
root=kwargs.pop('root', False),
fname=f,
show=show,
**kwargs)
# do no searching if 'any' is requested
if len(search) == 1 and list(search.keys())[0] == 'w' \
and hasattr(list(search.values())[0], 'pattern') \
and list(search.values())[0].pattern == r'.*':
all_matches = list(df.index)
else:
for k, v in search.items():
adj, k = determine_adjacent(k)
res = search_this(df, k[0], k[-1], v, adjacent=adj, coref=coref)
for r in res:
all_matches.append(r)
all_matches = remove_by_mode(all_matches, searchmode, search)
if exclude:
for k, v in exclude.items():
adj, k = determine_adjacent(k)
res = search_this(df, k[0], k[-1], v, adjacent=adj, coref=coref)
for r in res:
all_exclude.append(r)
all_exclude = remove_by_mode(all_exclude, excludemode, exclude)
all_matches = all_matches.difference(all_exclude)
if coref:
all_matches = get_corefs(df, all_matches)
out, conc_out = show_this(df, all_matches, show, metadata, conc,
coref=coref, category=category,
show_conc_metadata=show_conc_metadata,
**kwargs)
return out, conc_out
def load_raw_data(f):
"""
Loads the stripped and raw versions of a parsed file
"""
from corpkit.process import saferead
# open the unparsed version of the file, read into memory
stripped_txtfile = f.replace('.conll', '').replace('-parsed', '-stripped')
stripped_txtdata, enc = saferead(stripped_txtfile)
# open the unparsed version with speaker ids
id_txtfile = f.replace('.conll', '').replace('-parsed', '')
id_txtdata, enc = saferead(id_txtfile)
return stripped_txtdata, id_txtdata
def get_speaker_from_offsets(stripped, plain, sent_offsets,
metadata_mode=False,
speaker_segmentation=False):
"""
Take offsets and get a speaker ID or metadata from them
"""
if not stripped and not plain:
return {}
start, end = sent_offsets
sent = stripped[start:end]
# find out line number
# sever at start of match
cut_old_text = stripped[:start]
line_index = cut_old_text.count('\n')
# lookup this text
with_id = plain.splitlines()[line_index]
# parse xml tags in original file ...
meta_dict = {'speaker': 'none'}
if metadata_mode:
metad = with_id.strip().rstrip('>').rsplit('<metadata ', 1)
import shlex
from corpkit.constants import PYTHON_VERSION
try:
shxed = shlex.split(metad[-1].encode('utf-8')) if PYTHON_VERSION == 2 \
else shlex.split(metad[-1])
except:
shxed = metad[-1].split("' ")
for m in shxed:
if PYTHON_VERSION == 2:
m = m.decode('utf-8')
# in rare cases of weirdly formatted xml:
try:
k, v = m.split('=', 1)
v = v.replace(u"\u2018", "'").replace(u"\u2019", "'").strip("'").strip('"')
meta_dict[k] = v
except ValueError:
continue
if speaker_segmentation:
split_line = with_id.split(': ', 1)
# handle multiple tags?
if len(split_line) > 1:
speakerid = split_line[0]
else:
speakerid = 'UNIDENTIFIED'
meta_dict['speaker'] = speakerid
return meta_dict
def convert_json_to_conll(path,
speaker_segmentation=False,
coref=False,
metadata=False,
just_files=False):
"""
take json corenlp output and convert to conll, with
dependents, speaker ids and so on added.
Path is for the parsed corpus, or a list of files within a parsed corpus
Might need to fix if outname used?
"""
import json
import re
from corpkit.build import get_filepaths
from corpkit.constants import CORENLP_VERSION, OPENER
# todo: stabilise this
#if CORENLP_VERSION == '3.7.0':
# coldeps = 'enhancedPlusPlusDependencies'
#else:
# coldeps = 'collapsed-ccprocessed-dependencies'
print('Converting files to CONLL-U...')
if just_files:
files = just_files
else:
if isinstance(path, list):
files = path
else:
files = get_filepaths(path, ext='conll')
for f in files:
if speaker_segmentation or metadata:
stripped, raw = load_raw_data(f)
else:
stripped, raw = None, None
main_out = ''
# if the file has already been converted, don't worry about it
# untested?
with OPENER(f, 'r') as fo:
#try:
try:
data = json.load(fo)
except ValueError:
continue
# todo: differentiate between json errors
# rsc corpus had one json file with an error
# outputted by corenlp, and the conversion
# failed silently here
#except ValueError:
# continue
for idx, sent in enumerate(data['sentences'], start=1):
tree = sent['parse'].replace('\n', '')
tree = re.sub(r'\s+', ' ', tree)
# offsets for speaker_id
sent_offsets = (sent['tokens'][0]['characterOffsetBegin'], \
sent['tokens'][-1]['characterOffsetEnd'])
metad = get_speaker_from_offsets(stripped,
raw,
sent_offsets,
metadata_mode=True,
speaker_segmentation=speaker_segmentation)
# currently there is no standard for sent_id, so i'm leaving it out, but
# if https://github.com/UniversalDependencies/docs/issues/273 is updated
# then i could switch it back
#output = '# sent_id %d\n# parse=%s\n' % (idx, tree)
output = '# parse=%s\n' % tree
for k, v in sorted(metad.items()):
output += '# %s=%s\n' % (k, v)
for token in sent['tokens']:
index = str(token['index'])
# this got a stopiteration on rsc data
governor, func = next(((i['governor'], i['dep']) \
for i in sent.get('enhancedPlusPlusDependencies',
sent.get('collapsed-ccprocessed-dependencies')) \
if i['dependent'] == int(index)), ('_', '_'))
if governor is '_':
depends = False
else:
depends = [str(i['dependent']) for i in sent.get('enhancedPlusPlusDependencies',
sent.get('collapsed-ccprocessed-dependencies')) if i['governor'] == int(index)]
if not depends:
depends = '0'
#offsets = '%d,%d' % (token['characterOffsetBegin'], token['characterOffsetEnd'])
line = [index,
token['word'],
token['lemma'],
token['pos'],
token.get('ner', '_'),
'_', # this is morphology, which is unannotated always, but here to conform to conll u
governor,
func,
','.join(depends)]
# no ints
line = [str(l) if isinstance(l, int) else l for l in line]
from corpkit.constants import PYTHON_VERSION
if PYTHON_VERSION == 2:
try:
[unicode(l, errors='ignore') for l in line]
except TypeError:
pass
output += '\t'.join(line) + '\n'
main_out += output + '\n'
# post process corefs
if coref:
import re
dct = {}
idxreg = re.compile('^([0-9]+)\t([0-9]+)')
splitmain = main_out.split('\n')
# add tab _ to each line, make dict of sent-token: line index
for i, line in enumerate(splitmain):
if line and not line.startswith('#'):
splitmain[i] += '\t_'
match = re.search(idxreg, line)
if match:
l, t = match.group(1), match.group(2)
dct[(int(l), int(t))] = i
# for each coref chain, if there are corefs
for numstring, list_of_dicts in sorted(data.get('corefs', {}).items()):
# for each mention
for d in list_of_dicts:
snum = d['sentNum']
# get head?
# this has been fixed in dev corenlp: 'headIndex' --- could simply use that
# ref : https://github.com/stanfordnlp/CoreNLP/issues/231
for i in range(d['startIndex'], d['endIndex']):
try:
ix = dct[(snum, i)]
fixed_line = splitmain[ix].rstrip('\t_') + '\t%s' % numstring
gv = fixed_line.split('\t')[6]
try:
gov_s = int(gv)
except ValueError:
continue
if gov_s < d['startIndex'] or gov_s > d['endIndex']:
fixed_line += '*'
splitmain[ix] = fixed_line
dct.pop((snum, i))
except KeyError:
pass
main_out = '\n'.join(splitmain)
from corpkit.constants import OPENER
with OPENER(f, 'w', encoding='utf-8') as fo:
main_out = main_out.replace(u"\u2018", "'").replace(u"\u2019", "'")
fo.write(main_out)
|
|
# Pybot discord-unstable build
# Misha Larionov and Nicholas Carr
# github.com/MishaLarionov/pybot/tree/discord-unstable
# Licensed under MIT License
# See license.txt for full license
# TODO:
# Move responses into a separate .py file
print("Loading... (This may take a while)")
#Import all the stuff
import cfg, time, platform, ast, sys, os, re
#from html.parser import HTMLParser
#Second line of import statements. These may need to be installed
import asyncio, discord, requests, dateparser, wikipedia, github3
wikipedia.set_lang("en")
try:
cfg.GITHUBCHANNEL
cfg.REPOS
except:
pass
else:
g = github3.GitHub()
repos = []
lastCommits = {}
for r in range(len(cfg.REPOS)):
repo = github3.repository(cfg.REPOS[r][0], cfg.REPOS[r][1])
repos.append(repo)
lastCommits[repo.id] = {}
for b in repos[r].iter_branches():
lastCommits[repo.id][b.name] = b.commit
#Client intialization stuff
client = discord.Client()
#Initialize help string
helpString = """PYBOT V5 HELP
http://github.com/MishaLarionov/pybot/tree/discord\n
Commands are called by preceeding them with `-p`, or `!`
Examples: `-p help`, `!help`
`help` shows this page
`idea: <text>` Records a suggestion in your name. You can see it with @pybot getideas
`getideas <username>` Lists ideas from user. *username* can be omitted to get your own ideas.
`delidea <n>` Deletes the idea with the number *n*
`clearideas` Deletes ALL your ideas
`whatis <query>` Gets a summary of the Wikipedia page for <query>
`machineinfo` Gets server name and operating system
`splitchannel` Keeps future ideas from this channel separate from others, only accessible from the channel in which this command is run.
`mergechannel` Makes ideas from channel available to all channels.
`setresponse \"<response>\" for \"<call>\"` Has me respond with *response* whenever your message matches *call*
`getresponses` Gets all automated responses
`delresponse <call>` Deletes the response for call
`clearresponses` Deletes ALL responses
`getout` Makes pybot leave the server. Only usable by the owner.
To add me to your server visit http://bit.ly/getpybot
"""
print("Setup finished")
def readFile(channel):
#Function for grabbing the dictionary from the file
d = {"responses": {}}
try:
#See if the channel exists
with open("data/" + channel.id + ".txt", 'r') as f:
for line in f:
#Grab the keys and values
(key, val) = line.split("|", maxsplit = 1)
#Rebuild the dictionary
#Literally no clue how this line works
#I think this might have code injection potential.
d[key] = ast.literal_eval(val)
except:
try:
#Open the main file if the channel is not separate
with open("data/" + channel.server.id + ".txt", 'r') as f:
for line in f:
#Grab the keys and values
(key, val) = line.split("|", maxsplit = 1)
#Rebuild the dictionary
#Literally no clue how this line works
d[key] = ast.literal_eval(val)
except:
#Create a new file if <server>.txt doesn't exist
#Or if an error happens. (99% sure this is fixed)
f = open("data/" + channel.server.id + ".txt", 'w')
f.write("responses|{}")
f.close()
return(d)
def readStocks(user):
#Function for grabbing the dictionary from the file
d = {}
try:
#See if the user has data
with open("data/stocks/" + user.id + ".txt", 'r') as f:
for line in f:
#Grab the keys and values
(key, val) = line.split("|", maxsplit = 1)
#Rebuild the dictionary
#Literally no clue how this line works, I think it evaluates it as JSON?
d[key] = ast.literal_eval(val)
except:
#Create a new file if the data doesn't exist
#Or if an error happens. (99% sure this is fixed)
f = open("data/stocks/" + user.id + ".txt", 'w')
f.write("cash|10000")
f.close()
return(d)
def writeDict(d, channel):
#Function to write the dictionary to the file
s = ""
#Get the keys to match them with index numbers
keys = list(d.keys())
for i in range(0, len(d)):
#Write each line of the file with the proper syntax
s = (s + keys[i] + "|" + str(d[keys[i]]) + "\n")
try:
f = open("data/" + channel.id + ".txt", 'w')
except:
f = open("data/" + channel.server.id + ".txt", 'w')
#Overwrite the file with the new content
f.write(s)
f.close()
return()
@asyncio.coroutine
def newIdea(text, user, channel):
#Function to add idea for the user
try:
#Grab the dictionary from the file
d = readFile(channel)
#Create a backup in case something happens
dProtect = readFile(channel)
try:
#Add the idea to the user's list of ideas
d[user.id].append(text)
except:
#Create the user in the dictionary if they don't exist
d[user.id] = [text]
writeDict(d, channel)
if readFile(channel) == {}:
#Revert the file if an error wiped it
yield from debug("Something wiped the file. Restoring to previous version...")
writeDict(dProtect, channel)
except Exception as e:
yield from client.send_message(channel, ":warning: Sorry, I couldn't add your idea. Please try again!")
writeDict(dProtect)
else:
yield from client.send_message(channel, "{}'s idea has been added.".format(user.mention))
# WIP function for the fetching of stock prices. Horribly inefficient
# This is basically a hacked-together Google Finance API
# I used RegEx instead of an HTML parser because pip is broken for me
@asyncio.coroutine
def getPrice(ticker, channel):
try:
financepage = requests.get('https://www.google.com/finance?q=' + ticker).text
regex = re.search('<span class="pr">[\s]+(<span [a-z\_0-9"=\s]+>)+([0-9\.]+)<', financepage)
price = regex.group(2)
regex = re.search('<title>([A-Za-z\s\-]+)', financepage)
title = regex.group(1)
regex = re.search('\(([\-0-9\.]+%)\)', financepage)
change = regex.group(1)
except:
yield from client.send_message(channel, ":warning: Stock not found! \n ```Make sure you're using the correct stock symbol. If it still doesn't work, create an issue on the git repo.```")
else:
if change[0] == "-":
tickerstring = ":chart_with_downwards_trend: **{}** - ${} - (Down {} Today)".format(title, price, change[1:])
else:
tickerstring = ":chart_with_upwards_trend: **{}** - ${} - (Up {} Today)".format(title, price, change)
yield from client.send_message(channel, tickerstring)
@asyncio.coroutine
def getIdeas(name, channel):
#Check if the user exists
user = channel.server.get_member_named(name)
if user:
userID = user.id
#Grab the dictionary from the text file
d = readFile(channel)
#Check if the user is in the idea dictionary
if userID in d:
#Check if the user has any ideas
if len(d[userID]) > 0:
#Output a numbered list of the user's ideas
s = (":open_file_folder: Ideas for {}:".format(user.mention))
for i in range(0, len(d[userID])):
s = (s + ("\n`" + str(i+1) + ":` " + d[userID][i]))
yield from client.send_message(channel, s)
#Begin descending staircase of error messages
else:
yield from client.send_message(channel, ":warning: " + user.mention + " has not entered any ideas yet!")
else:
yield from client.send_message(channel, ":warning: " + user.mention + " has not entered any ideas yet!")
else:
yield from client.send_message(channel, ":warning: Name not found! Please try again!")
@asyncio.coroutine
def delIdea(num, author, channel):
try:
#Makes sure "1" points to d[userID][0]
num = int(num) - 1
if num < 0:
num = num + 1
#Grab the dictionary from the text file
d = readFile(channel)
#Make sure the number is not greater than the amount of elements
if (num + 1) > len(d[author]) and len(d[author]) > 0:
client.send_message(channel, ":warning: That's more ideas than you have! You currently have " + str(len(d[author])) + " ideas entered.")
elif len(d[author]) == 0:
yield from client.send_message(channel, ":warning: You don't have any ideas to delete!")
else:
#Get rid of the element
e = d[author].pop(num)
#Rebuild the dictionary
writeDict(d, channel)
yield from client.send_message(channel, ":wastebasket: Idea `" + e.replace("`", "'") + "` deleted.")
except:
yield from client.send_message(channel, ":warning: Invalid number. Please try again.")
@asyncio.coroutine
def splitChannel(channel):
#Overwrite the file with the new content
try:
f = open("data/" + channel.id + ".txt", 'r')
except:
f = open("data/" + channel.id + ".txt", 'w')
f.write("responses|{}")
yield from client.send_message(channel, "Any new ideas posted here will be kept separate and only accessible in this server.")
else:
yield from client.send_message(channel, "Channel already separate. Use `@pybot mergechannel` to merge this channel with the main branch, copying all data.")
f.close()
return()
@asyncio.coroutine
def mergeChannel(channel):
#Overwrite the file with the new content
try:
f = open("data/" + channel.id + ".txt", 'r')
f.close()
except:
yield from client.send_message(channel, "Channel uses the main idea database. Use `@pybot splitchannel` to split it.")
else:
try:
server = channel.server.id
d1 = readFile(channel)
os.remove("data/" + channel.id + ".txt")
d2 = readFile(channel)
s = ""
#Get the keys to match them with index numbers
keys = list(d1.keys()) + list(d2.keys())
for i in range(0, len(keys)):
if keys[i] == "responses":
pass
#Write each line of the file with the proper syntax
elif keys[i] in d1:
if keys[i] in d2:
s = (s + keys[i] + "|" + str(d1[keys[i]] + d2[keys[i]]) + "\n")
else:
s = (s + keys[i] + "|" + str(d1[keys[i]]) + "\n")
else:
s = (s + keys[i] + "|" + str(d2[keys[i]]) + "\n")
s = (s + "responses|" + str(d2["responses"]))
f2 = open("data/" + server + ".txt", 'w')
f2.write(s)
f2.close()
yield from client.send_message(channel, "Successfully merged channel with the main branch.")
except AttributeError:
yield from client.send_message(channel, ":warning: This channel is not attached to a server, likely a Direct Message. Ideas cannot be merged.")
return()
@asyncio.coroutine
def clearIdeas(author, channel):
#Grab the dictionary from the text file
d = readFile(channel)
if (not author.id in d.keys()) or len(d[author.id]) == 0:
yield from client.send_message(channel, ":warning: You don't have any ideas to delete!")
else:
d[author.id] = []
writeDict(d, channel)
yield from client.send_message(channel, ":wastebasket: Ideas for {} cleared.".format(author.mention))
def setReminder():
#Unfinished reminder code
event = message.content.split(" ", maxsplit = 1)[1]
(name, datetime) = event.split("@", maxsplit = 1)
name = name.strip()
dtime = dateparser.parse(datetime)
s = sched.scheduler()
s.enterabs(dtime.timestamp(), 1, (remind, message.channel), name)
s.run()
@asyncio.coroutine
def setResponse(response, call, channel):
d = readFile(channel)
#No idea what this regex does, Nicholas needs to comment this
call = re.sub('([.,!?()])', r' \1 ', call)
if call in d["responses"]:
oldresponse = d["responses"][call]
d["responses"][call] = response
writeDict(d, channel)
yield from client.send_message(channel, "Changed response from `" + oldresponse + "` to `" + response + "`.")
else:
d["responses"][call] = response
writeDict(d, channel)
yield from client.send_message(channel, "Added response to list")
@asyncio.coroutine
def whatIs(user, channel, message):
searchResults = wikipedia.search(message)
if len(searchResults) < 1:
yield from client.send_message(channel, ":warning: Could not find anything matching your search, {}. Try using different keywords.".format(user.mention))
else:
try:
page = wikipedia.page(searchResults[0], auto_suggest = True, redirect = True)
output = '{}, here you go:\n**'.format(user.mention) + page.title + "**\nFrom <" + page.url + ">\n" + wikipedia.summary(searchResults[0], sentences=1)
yield from client.send_message(channel, output)
except wikipedia.exceptions.DisambiguationError as e:
yield from client.send_message(channel, '{}, '.format(user.mention) + str(e))
@asyncio.coroutine
def delResponse(call, channel):
d = readFile(channel)
#No idea what this regex does, Nicholas needs to comment this
call = re.sub('([.,!?()])', r' \1 ', call)
if call in d["responses"]:
del(d["responses"][call])
writeDict(d, channel)
yield from client.send_message(channel, ":wastebasket: Removed response.")
else:
yield from client.send_message(channel, ":warning: I don't respond to that!")
@asyncio.coroutine
def getResponses(channel):
d = readFile(channel)
#Check if there are any responses
if d["responses"]:
#Output a numbered list of the user's ideas
s = ("I respond with:")
for i in d["responses"]:
s = (s + ("\n`" + d["responses"][i] + "` for `" + i + "`"))
yield from client.send_message(channel, s)
else:
yield from client.send_message(channel, ":warning: There are no responses here!")
@asyncio.coroutine
def clearResponses(channel):
d = readFile(channel)
#Make sure there are responses to clear
if len(d["responses"]) > 0:
#Bushwhack all the responses
d["responses"] = {}
writeDict(d, channel)
yield from client.send_message(channel, ":wastebasket: Removed responses.")
else:
yield from client.send_message(channel, ":warning: There are no responses here!")
@asyncio.coroutine
def versionInfo(channel):
#Basically looks at itself and compares itself with the github
sourcetemp = open("bot.py", "r")
#Look at own code
currentcode = sourcetemp.read()
#Load code from GitHub for stable and unstable branches
stablecode = requests.get('https://raw.githubusercontent.com/MishaLarionov/pybot/discord/bot.py')
unstablecode = requests.get('https://raw.githubusercontent.com/MishaLarionov/pybot/discord-unstable/bot.py')
#Compare both code samples
if currentcode == stablecode.text:
yield from client.send_message(channel, ":thumbsup: This bot instance is up to date with the latest stable build.")
elif currentcode == unstablecode.text:
yield from client.send_message(channel, ":alembic: This bot instance is up to date with the latest unstable build.")
else:
yield from client.send_message(channel, ":warning: This bot instance does not match any known version. This is probably a test build.")
@asyncio.coroutine
def getChanges(repos, lastCommits):
while True:
#Make sure we have the freshest data, but tell the server to give us nothing if our data is already fresh
for repo in repos:
repo.refresh(conditional=True)
for b in repo.iter_branches():
#If anything actually happened
if b.commit != lastCommits[repo.id][b.name]:
events = repo.iter_events()
#Go through everything that ever happened on the repo to see what's new
for i in events:
#If we pushed some changes and the old commit came in just before this change
if i.type == "PushEvent" and i.payload["before"] == lastCommits[repo.id][b.name].sha:
#Draft the beginning of the message
m = "[" + repo.name + "] " + str(i.payload["size"]) + " new commit" + ("s" if i.payload["size"] != 1 else "") + " pushed by " + i.actor.login + " <" + repo.compare_commits(i.payload["before"], i.payload["head"]).html_url + ">:\n"
for c in i.payload["commits"]:
#Describe each new commit
m += "`" + c["sha"][:7] + "` " + c["message"] + " - " + c["author"]["name"] + " - <" + repo.commit(c["sha"]).html_url + ">\n"
yield from client.send_message(client.get_channel(cfg.GITHUBCHANNEL), m)
#Update the last seen commit for later
lastCommits[repo.id][b.name] = repo.commit(i.payload["head"])
#If this is the last event which occured between updates
if i.payload["head"] == repo.commit("discord-unstable").sha:
break
yield from asyncio.sleep(120)
@asyncio.coroutine
def processCommand(rawstring, channel, user, message):
#Process the user's commands
if rawstring == "help":
yield from client.delete_message(message)
yield from client.send_message(user, helpString)
elif " " in rawstring:
yield from client.send_typing(message.channel)
(cmd, message) = rawstring.split(" ", maxsplit = 1)
cmd = cmd.lower()
if cmd == "hello":
yield from client.send_message(channel, 'Hello, {}!'.format(user.mention))
elif cmd == "idea" or cmd == "idea:":
yield from newIdea(message, user, channel)
elif cmd == "getideas":
yield from getIdeas(message, channel)
elif cmd == "delidea":
yield from delIdea(message, user.id, channel)
elif cmd == "clearideas":
yield from clearIdeas(user, channel)
elif cmd == "whatis":
yield from whatIs(user, channel, message)
elif cmd == "what":
if message.startswith("is "):
yield from whatIs(user, channel, message[3:])
elif message.startswith("are "):
yield from whatIs(user, channel, message[4:])
elif message.startswith("was "):
yield from whatIs(user, channel, message[4:])
elif message.startswith("were "):
yield from whatIs(user, channel, message[5:])
else:
yield from client.send_message(channel, ":warning: Unknown command. `@pybot help` for a list of commands.")
elif cmd == "what's":
yield from whatIs(user, channel, message)
elif cmd == "remind":
#Code goes here someday
print("Reminder code doesn't exist yet, please create some.")
yield from client.send_message(channel, ":warning: Nicholas (@ncarr) forgot to write this code.")
elif cmd == "setresponse":
if message.strip()[0] == "\"":
try:
yield from setResponse(message.split("\"")[1], message.split("\"")[3], channel)
except IndexError:
yield from client.send_message(channel, ":warning: Improper syntax! Please use either single or double quotes for both call and response!")
elif message.strip()[0] == "'":
try:
yield from setResponse(message.split("'")[1], message.split("'")[3], channel)
except IndexError:
yield from client.send_message(channel, ":warning: Improper syntax! Please use either single or double quotes for both call and response!")
elif cmd == "delresponse":
yield from delResponse(message, channel)
else:
yield from client.send_message(channel, ":warning: Unknown command. `@pybot help` for a list of commands.")
else:
yield from client.send_typing(message.channel)
rawstring = rawstring.lower()
if rawstring == "hello":
yield from client.send_message(channel, 'Hello, {}!'.format(user.mention))
elif rawstring == "die" or rawstring == "kys":
print(user.name + " tried to kill the bot!")
#Find the people that are allowed to kill the bot
if user.id in cfg.KILLERIDS:
yield from client.send_message(channel, ":robot::gun:")
time.sleep(1)
yield from client.send_message(channel, ":boom::gun:")
print(user.name + " has killed me! Avenge me!")
sys.exit()
else:
yield from client.send_message(channel, ":warning: You don't have permission to kill me! I see you don't like me, perhaps you don't understand my commands. `@pybot help` to learn more. If you really hate me, get your channel owner to send `@pybot getout`.")
elif rawstring == "clearideas":
yield from clearIdeas(user, channel)
elif rawstring == "machineinfo":
yield from client.send_message(channel, platform.node() + " " + platform.platform())
elif rawstring == "getideas":
yield from getIdeas(user.name, channel)
elif rawstring == "getresponses":
yield from getResponses(channel)
elif rawstring == "clearresponses":
yield from clearResponses(channel)
elif rawstring == "splitchannel":
yield from splitChannel(channel)
elif rawstring == "mergechannel":
yield from mergeChannel(channel)
elif rawstring == "versioninfo":
yield from versionInfo(channel)
elif rawstring == "getout":
if user == channel.server.owner:
yield from client.send_message(channel, "Alright, I'll leave your server.. :cry:\n(http://bit.ly/getpybot to re-add me)")
yield from client.leave_server(channel.server)
else:
yield from client.send_message(channel, ":warning: Only the server owner can make me leave!")
else:
yield from client.send_message(channel, ":warning: Unknown command. `-p help` for proper commands.")
@asyncio.coroutine
def remind(name, channel):
#Unfinished useless code
yield from client.send_message(channel, "You asked me to remind you to" + name)
@asyncio.coroutine
def debug(text):
#Automatically decides whether to debug or not
if cfg.DEBUGMODE:
debug = client.get_channel(cfg.DEBUGCH)
yield from client.send_message(debug, text)
@asyncio.coroutine
def processResponse(message):
for word in readFile(message.channel)["responses"]:
#Pad and sub to ensure only whole words are matched and punctuation doesn't stop matches
if (" " + word + " ") in re.sub('([.,!?()])', r' \1 ', " " + message.content + " "):
yield from client.send_message(message.channel, readFile(message.channel)["responses"][word])
return True
return False
@client.event
@asyncio.coroutine
def on_message(message):
try:
print(time.strftime("%Y-%m-%d %H:%M:%S") + ": " + message.author.name + " says: " + message.content)
except:
print(time.strftime("%Y-%m-%d %H:%M:%S") + ": " + message.author.name + " sent something containing emojis.")
if message.author == client.user:
return
if message.author.bot == False:
if message.content.startswith("$") and len(message.content) > 1 and len(message.content) <= 6:
yield from getPrice(message.content[1:], message.channel)
if message.content.startswith("!") and len(message.content) > 1:
yield from processCommand(message.content[1:], message.channel, message.author, message)
if message.content.startswith("<@" + client.user.id + ">") and len(message.content) > 22:
yield from processCommand(str.strip(message.content[22:]), message.channel, message.author, message)
if message.content.startswith("@" + client.user.name) and len(message.content) > 7:
yield from processCommand(str.strip(message.content[7:]), message.channel, message.author, message)
elif message.content.startswith("-p") and len(message.content) > 3:
yield from processCommand(str.strip(message.content[3:]), message.channel, message.author, message)
elif message.content == "<@" + client.user.id + ">" or message.content == "-p" or message.content == "@" + client.user.name:
yield from client.send_message(message.channel, 'Hello {}!'.format(message.author.mention))
elif message.content == "sudo rm -rf":
yield from processCommand("clearideas", message.channel, message.author, message)
else:
yield from processResponse(message)
@client.event
@asyncio.coroutine
def on_ready():
print(time.strftime("%Y-%m-%d %H:%M:%S") + ': Connected to Discord')
try:
cfg.GITHUBCHANNEL
cfg.REPOS
except:
pass
else:
yield from getChanges(repos, lastCommits)
client.run(cfg.TOKEN)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.kms_v1.services.ekm_service import pagers
from google.cloud.kms_v1.types import ekm_service
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import EkmServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import EkmServiceGrpcTransport
from .transports.grpc_asyncio import EkmServiceGrpcAsyncIOTransport
class EkmServiceClientMeta(type):
"""Metaclass for the EkmService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[EkmServiceTransport]]
_transport_registry["grpc"] = EkmServiceGrpcTransport
_transport_registry["grpc_asyncio"] = EkmServiceGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[EkmServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class EkmServiceClient(metaclass=EkmServiceClientMeta):
"""Google Cloud Key Management EKM Service
Manages external cryptographic keys and operations using those keys.
Implements a REST model with the following objects:
- [EkmConnection][google.cloud.kms.v1.EkmConnection]
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "cloudkms.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
EkmServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
EkmServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> EkmServiceTransport:
"""Returns the transport used by the client instance.
Returns:
EkmServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def ekm_connection_path(project: str, location: str, ekm_connection: str,) -> str:
"""Returns a fully-qualified ekm_connection string."""
return "projects/{project}/locations/{location}/ekmConnections/{ekm_connection}".format(
project=project, location=location, ekm_connection=ekm_connection,
)
@staticmethod
def parse_ekm_connection_path(path: str) -> Dict[str, str]:
"""Parses a ekm_connection path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/ekmConnections/(?P<ekm_connection>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def service_path(project: str, location: str, namespace: str, service: str,) -> str:
"""Returns a fully-qualified service string."""
return "projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}".format(
project=project, location=location, namespace=namespace, service=service,
)
@staticmethod
def parse_service_path(path: str) -> Dict[str, str]:
"""Parses a service path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/namespaces/(?P<namespace>.+?)/services/(?P<service>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, EkmServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the ekm service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, EkmServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, EkmServiceTransport):
# transport is a EkmServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def list_ekm_connections(
self,
request: Union[ekm_service.ListEkmConnectionsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListEkmConnectionsPager:
r"""Lists [EkmConnections][google.cloud.kms.v1.EkmConnection].
.. code-block:: python
from google.cloud import kms_v1
def sample_list_ekm_connections():
# Create a client
client = kms_v1.EkmServiceClient()
# Initialize request argument(s)
request = kms_v1.ListEkmConnectionsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_ekm_connections(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.kms_v1.types.ListEkmConnectionsRequest, dict]):
The request object. Request message for
[KeyManagementService.ListEkmConnections][].
parent (str):
Required. The resource name of the location associated
with the
[EkmConnections][google.cloud.kms.v1.EkmConnection] to
list, in the format ``projects/*/locations/*``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.kms_v1.services.ekm_service.pagers.ListEkmConnectionsPager:
Response message for
[KeyManagementService.ListEkmConnections][].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ekm_service.ListEkmConnectionsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ekm_service.ListEkmConnectionsRequest):
request = ekm_service.ListEkmConnectionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_ekm_connections]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListEkmConnectionsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_ekm_connection(
self,
request: Union[ekm_service.GetEkmConnectionRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ekm_service.EkmConnection:
r"""Returns metadata for a given
[EkmConnection][google.cloud.kms.v1.EkmConnection].
.. code-block:: python
from google.cloud import kms_v1
def sample_get_ekm_connection():
# Create a client
client = kms_v1.EkmServiceClient()
# Initialize request argument(s)
request = kms_v1.GetEkmConnectionRequest(
name="name_value",
)
# Make the request
response = client.get_ekm_connection(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.kms_v1.types.GetEkmConnectionRequest, dict]):
The request object. Request message for
[KeyManagementService.GetEkmConnection][].
name (str):
Required. The
[name][google.cloud.kms.v1.EkmConnection.name] of the
[EkmConnection][google.cloud.kms.v1.EkmConnection] to
get.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.kms_v1.types.EkmConnection:
An [EkmConnection][google.cloud.kms.v1.EkmConnection] represents an
individual EKM connection. It can be used for
creating [CryptoKeys][google.cloud.kms.v1.CryptoKey]
and
[CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]
with a
[ProtectionLevel][google.cloud.kms.v1.ProtectionLevel]
of
[EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC],
as well as performing cryptographic operations using
keys created within the
[EkmConnection][google.cloud.kms.v1.EkmConnection].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ekm_service.GetEkmConnectionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ekm_service.GetEkmConnectionRequest):
request = ekm_service.GetEkmConnectionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_ekm_connection]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_ekm_connection(
self,
request: Union[ekm_service.CreateEkmConnectionRequest, dict] = None,
*,
parent: str = None,
ekm_connection_id: str = None,
ekm_connection: ekm_service.EkmConnection = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ekm_service.EkmConnection:
r"""Creates a new [EkmConnection][google.cloud.kms.v1.EkmConnection]
in a given Project and Location.
.. code-block:: python
from google.cloud import kms_v1
def sample_create_ekm_connection():
# Create a client
client = kms_v1.EkmServiceClient()
# Initialize request argument(s)
request = kms_v1.CreateEkmConnectionRequest(
parent="parent_value",
ekm_connection_id="ekm_connection_id_value",
)
# Make the request
response = client.create_ekm_connection(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.kms_v1.types.CreateEkmConnectionRequest, dict]):
The request object. Request message for
[KeyManagementService.CreateEkmConnection][].
parent (str):
Required. The resource name of the location associated
with the
[EkmConnection][google.cloud.kms.v1.EkmConnection], in
the format ``projects/*/locations/*``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
ekm_connection_id (str):
Required. It must be unique within a location and match
the regular expression ``[a-zA-Z0-9_-]{1,63}``.
This corresponds to the ``ekm_connection_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
ekm_connection (google.cloud.kms_v1.types.EkmConnection):
Required. An
[EkmConnection][google.cloud.kms.v1.EkmConnection] with
initial field values.
This corresponds to the ``ekm_connection`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.kms_v1.types.EkmConnection:
An [EkmConnection][google.cloud.kms.v1.EkmConnection] represents an
individual EKM connection. It can be used for
creating [CryptoKeys][google.cloud.kms.v1.CryptoKey]
and
[CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]
with a
[ProtectionLevel][google.cloud.kms.v1.ProtectionLevel]
of
[EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC],
as well as performing cryptographic operations using
keys created within the
[EkmConnection][google.cloud.kms.v1.EkmConnection].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, ekm_connection_id, ekm_connection])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ekm_service.CreateEkmConnectionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ekm_service.CreateEkmConnectionRequest):
request = ekm_service.CreateEkmConnectionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if ekm_connection_id is not None:
request.ekm_connection_id = ekm_connection_id
if ekm_connection is not None:
request.ekm_connection = ekm_connection
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_ekm_connection]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_ekm_connection(
self,
request: Union[ekm_service.UpdateEkmConnectionRequest, dict] = None,
*,
ekm_connection: ekm_service.EkmConnection = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ekm_service.EkmConnection:
r"""Updates an [EkmConnection][google.cloud.kms.v1.EkmConnection]'s
metadata.
.. code-block:: python
from google.cloud import kms_v1
def sample_update_ekm_connection():
# Create a client
client = kms_v1.EkmServiceClient()
# Initialize request argument(s)
request = kms_v1.UpdateEkmConnectionRequest(
)
# Make the request
response = client.update_ekm_connection(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.kms_v1.types.UpdateEkmConnectionRequest, dict]):
The request object. Request message for
[KeyManagementService.UpdateEkmConnection][].
ekm_connection (google.cloud.kms_v1.types.EkmConnection):
Required.
[EkmConnection][google.cloud.kms.v1.EkmConnection] with
updated values.
This corresponds to the ``ekm_connection`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. List of fields to be
updated in this request.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.kms_v1.types.EkmConnection:
An [EkmConnection][google.cloud.kms.v1.EkmConnection] represents an
individual EKM connection. It can be used for
creating [CryptoKeys][google.cloud.kms.v1.CryptoKey]
and
[CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]
with a
[ProtectionLevel][google.cloud.kms.v1.ProtectionLevel]
of
[EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC],
as well as performing cryptographic operations using
keys created within the
[EkmConnection][google.cloud.kms.v1.EkmConnection].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([ekm_connection, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ekm_service.UpdateEkmConnectionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ekm_service.UpdateEkmConnectionRequest):
request = ekm_service.UpdateEkmConnectionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if ekm_connection is not None:
request.ekm_connection = ekm_connection
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_ekm_connection]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("ekm_connection.name", request.ekm_connection.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
def set_iam_policy(
self,
request: iam_policy_pb2.SetIamPolicyRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Sets the IAM access control policy on the specified function.
Replaces any existing policy.
Args:
request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`):
The request object. Request message for `SetIamPolicy`
method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy.
It is used to specify access control policies for Cloud
Platform resources.
A ``Policy`` is a collection of ``bindings``. A
``binding`` binds one or more ``members`` to a single
``role``. Members can be user accounts, service
accounts, Google groups, and domains (such as G Suite).
A ``role`` is a named list of permissions (defined by
IAM or configured by users). A ``binding`` can
optionally specify a ``condition``, which is a logic
expression that further constrains the role binding
based on attributes about the request and/or target
resource.
**JSON Example**::
{
"bindings": [
{
"role": "roles/resourcemanager.organizationAdmin",
"members": [
"user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
},
{
"role": "roles/resourcemanager.organizationViewer",
"members": ["user:eve@example.com"],
"condition": {
"title": "expirable access",
"description": "Does not grant access after Sep 2020",
"expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')",
}
}
]
}
**YAML Example**::
bindings:
- members:
- user:mike@example.com
- group:admins@example.com
- domain:google.com
- serviceAccount:my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin
- members:
- user:eve@example.com
role: roles/resourcemanager.organizationViewer
condition:
title: expirable access
description: Does not grant access after Sep 2020
expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the `IAM
developer's
guide <https://cloud.google.com/iam/docs>`__.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.SetIamPolicyRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.set_iam_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_iam_policy(
self,
request: iam_policy_pb2.GetIamPolicyRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Gets the IAM access control policy for a function.
Returns an empty policy if the function exists and does not have a
policy set.
Args:
request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`):
The request object. Request message for `GetIamPolicy`
method.
retry (google.api_core.retry.Retry): Designation of what errors, if
any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy.
It is used to specify access control policies for Cloud
Platform resources.
A ``Policy`` is a collection of ``bindings``. A
``binding`` binds one or more ``members`` to a single
``role``. Members can be user accounts, service
accounts, Google groups, and domains (such as G Suite).
A ``role`` is a named list of permissions (defined by
IAM or configured by users). A ``binding`` can
optionally specify a ``condition``, which is a logic
expression that further constrains the role binding
based on attributes about the request and/or target
resource.
**JSON Example**::
{
"bindings": [
{
"role": "roles/resourcemanager.organizationAdmin",
"members": [
"user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
},
{
"role": "roles/resourcemanager.organizationViewer",
"members": ["user:eve@example.com"],
"condition": {
"title": "expirable access",
"description": "Does not grant access after Sep 2020",
"expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')",
}
}
]
}
**YAML Example**::
bindings:
- members:
- user:mike@example.com
- group:admins@example.com
- domain:google.com
- serviceAccount:my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin
- members:
- user:eve@example.com
role: roles/resourcemanager.organizationViewer
condition:
title: expirable access
description: Does not grant access after Sep 2020
expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the `IAM
developer's
guide <https://cloud.google.com/iam/docs>`__.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.GetIamPolicyRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_iam_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def test_iam_permissions(
self,
request: iam_policy_pb2.TestIamPermissionsRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Tests the specified IAM permissions against the IAM access control
policy for a function.
If the function does not exist, this will return an empty set
of permissions, not a NOT_FOUND error.
Args:
request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`):
The request object. Request message for
`TestIamPermissions` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.iam_policy_pb2.TestIamPermissionsResponse:
Response message for ``TestIamPermissions`` method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.test_iam_permissions,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-kms",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("EkmServiceClient",)
|
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Runs various chrome tests through heapcheck_test.py.
Most of this code is copied from ../valgrind/chrome_tests.py.
TODO(glider): put common functions to a standalone module.
'''
import glob
import logging
import optparse
import os
import stat
import sys
import logging_utils
import path_utils
import common
import heapcheck_test
class TestNotFound(Exception): pass
def Dir2IsNewer(dir1, dir2):
if dir2 is None or not os.path.isdir(dir2):
return False
if dir1 is None or not os.path.isdir(dir1):
return True
return os.stat(dir2)[stat.ST_MTIME] > os.stat(dir1)[stat.ST_MTIME]
def FindNewestDir(dirs):
newest_dir = None
for dir in dirs:
if Dir2IsNewer(newest_dir, dir):
newest_dir = dir
return newest_dir
def File2IsNewer(file1, file2):
if file2 is None or not os.path.isfile(file2):
return False
if file1 is None or not os.path.isfile(file1):
return True
return os.stat(file2)[stat.ST_MTIME] > os.stat(file1)[stat.ST_MTIME]
def FindDirContainingNewestFile(dirs, file):
"""Searches for the directory containing the newest copy of |file|.
Args:
dirs: A list of paths to the directories to search among.
file: A string containing the file name to search.
Returns:
The string representing the the directory containing the newest copy of
|file|.
Raises:
IOError: |file| was not found.
"""
newest_dir = None
newest_file = None
for dir in dirs:
the_file = os.path.join(dir, file)
if File2IsNewer(newest_file, the_file):
newest_dir = dir
newest_file = the_file
if newest_dir is None:
raise IOError("cannot find file %s anywhere, have you built it?" % file)
return newest_dir
class ChromeTests(object):
'''This class is derived from the chrome_tests.py file in ../purify/.
'''
def __init__(self, options, args, test):
# The known list of tests.
# Recognise the original abbreviations as well as full executable names.
self._test_list = {
"base": self.TestBase, "base_unittests": self.TestBase,
"browser": self.TestBrowser, "browser_tests": self.TestBrowser,
"googleurl": self.TestGURL, "googleurl_unittests": self.TestGURL,
"courgette": self.TestCourgette,
"courgette_unittests": self.TestCourgette,
"ipc": self.TestIpc, "ipc_tests": self.TestIpc,
"layout": self.TestLayout, "layout_tests": self.TestLayout,
"media": self.TestMedia, "media_unittests": self.TestMedia,
"net": self.TestNet, "net_unittests": self.TestNet,
"printing": self.TestPrinting, "printing_unittests": self.TestPrinting,
"remoting": self.TestRemoting, "remoting_unittests": self.TestRemoting,
"startup": self.TestStartup, "startup_tests": self.TestStartup,
"sync": self.TestSync, "sync_unit_tests": self.TestSync,
"test_shell": self.TestTestShell, "test_shell_tests": self.TestTestShell,
"ui": self.TestUI, "ui_tests": self.TestUI,
"unit": self.TestUnit, "unit_tests": self.TestUnit,
"app": self.TestApp, "app_unittests": self.TestApp,
"ui_unit": self.TestUIUnit, "ui_unittests": self.TestUIUnit,
"gfx": self.TestGfx, "gfx_unittests": self.TestGfx,
}
if test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
self._options = options
self._args = args
self._test = test
script_dir = path_utils.ScriptDir()
# Compute the top of the tree (the "source dir") from the script dir (where
# this script lives). We assume that the script dir is in tools/heapcheck/
# relative to the top of the tree.
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# Since this path is used for string matching, make sure it's always
# an absolute Unix-style path.
self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
heapcheck_test_script = os.path.join(script_dir, "heapcheck_test.py")
self._command_preamble = [heapcheck_test_script]
def _DefaultCommand(self, module, exe=None, heapcheck_test_args=None):
'''Generates the default command array that most tests will use.
Args:
module: The module name (corresponds to the dir in src/ where the test
data resides).
exe: The executable name.
heapcheck_test_args: additional arguments to append to the command line.
Returns:
A string with the command to run the test.
'''
module_dir = os.path.join(self._source_dir, module)
# We need multiple data dirs, the current script directory and a module
# specific one. The global suppression file lives in our directory, and the
# module specific suppression file lives with the module.
self._data_dirs = [path_utils.ScriptDir()]
if module == "chrome":
# Unfortunately, not all modules have the same directory structure.
self._data_dirs.append(os.path.join(module_dir, "test", "data",
"heapcheck"))
else:
self._data_dirs.append(os.path.join(module_dir, "data", "heapcheck"))
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
]
if exe:
self._options.build_dir = FindDirContainingNewestFile(dirs, exe)
else:
self._options.build_dir = FindNewestDir(dirs)
cmd = list(self._command_preamble)
if heapcheck_test_args != None:
for arg in heapcheck_test_args:
cmd.append(arg)
if exe:
cmd.append(os.path.join(self._options.build_dir, exe))
# Heapcheck runs tests slowly, so slow tests hurt more; show elapased time
# so we can find the slowpokes.
cmd.append("--gtest_print_time")
if self._options.gtest_repeat:
cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
return cmd
def Suppressions(self):
'''Builds the list of available suppressions files.'''
ret = []
for directory in self._data_dirs:
suppression_file = os.path.join(directory, "suppressions.txt")
if os.path.exists(suppression_file):
ret.append(suppression_file)
suppression_file = os.path.join(directory, "suppressions_linux.txt")
if os.path.exists(suppression_file):
ret.append(suppression_file)
return ret
def Run(self):
'''Runs the test specified by command-line argument --test.'''
logging.info("running test %s" % (self._test))
return self._test_list[self._test]()
def _ReadGtestFilterFile(self, name, cmd):
'''Reads files which contain lists of tests to filter out with
--gtest_filter and appends the command-line option to |cmd|.
Args:
name: the test executable name.
cmd: the test running command line to be modified.
'''
filters = []
for directory in self._data_dirs:
gtest_filter_files = [
os.path.join(directory, name + ".gtest.txt"),
os.path.join(directory, name + ".gtest-heapcheck.txt"),
os.path.join(directory, name + ".gtest_linux.txt")]
for filename in gtest_filter_files:
if os.path.exists(filename):
logging.info("reading gtest filters from %s" % filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
filters.append(line)
gtest_filter = self._options.gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
def SimpleTest(self, module, name, heapcheck_test_args=None, cmd_args=None):
'''Builds the command line and runs the specified test.
Args:
module: The module name (corresponds to the dir in src/ where the test
data resides).
name: The executable name.
heapcheck_test_args: Additional command line args for heap checker.
cmd_args: Additional command line args for the test.
'''
cmd = self._DefaultCommand(module, name, heapcheck_test_args)
supp = self.Suppressions()
self._ReadGtestFilterFile(name, cmd)
if cmd_args:
cmd.extend(["--"])
cmd.extend(cmd_args)
# Sets LD_LIBRARY_PATH to the build folder so external libraries can be
# loaded.
if (os.getenv("LD_LIBRARY_PATH")):
os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
self._options.build_dir))
else:
os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
return heapcheck_test.RunTool(cmd, supp, module)
def TestBase(self):
return self.SimpleTest("base", "base_unittests")
def TestBrowser(self):
return self.SimpleTest("chrome", "browser_tests")
def TestGURL(self):
return self.SimpleTest("chrome", "googleurl_unittests")
def TestCourgette(self):
return self.SimpleTest("courgette", "courgette_unittests")
def TestMedia(self):
return self.SimpleTest("chrome", "media_unittests")
def TestPrinting(self):
return self.SimpleTest("chrome", "printing_unittests")
def TestRemoting(self):
return self.SimpleTest("chrome", "remoting_unittests")
def TestSync(self):
return self.SimpleTest("chrome", "sync_unit_tests")
def TestIpc(self):
return self.SimpleTest("ipc", "ipc_tests")
def TestNet(self):
return self.SimpleTest("net", "net_unittests")
def TestStartup(self):
# We don't need the performance results, we're just looking for pointer
# errors, so set number of iterations down to the minimum.
os.putenv("STARTUP_TESTS_NUMCYCLES", "1")
logging.info("export STARTUP_TESTS_NUMCYCLES=1");
return self.SimpleTest("chrome", "startup_tests")
def TestTestShell(self):
return self.SimpleTest("webkit", "test_shell_tests")
def TestUnit(self):
return self.SimpleTest("chrome", "unit_tests")
def TestApp(self):
return self.SimpleTest("chrome", "app_unittests")
def TestUIUnit(self):
return self.SimpleTest("chrome", "ui_unittests")
def TestGfx(self):
return self.SimpleTest("chrome", "gfx_unittests")
def TestUI(self):
return self.SimpleTest("chrome", "ui_tests",
cmd_args=[
"--ui-test-timeout=120000",
"--ui-test-action-timeout=80000",
"--ui-test-action-max-timeout=180000",
"--ui-test-terminate-timeout=60000"])
def TestLayoutChunk(self, chunk_num, chunk_size):
'''Runs tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size).
Wrap around to beginning of list at end. If chunk_size is zero, run all
tests in the list once. If a text file is given as argument, it is used as
the list of tests.
'''
# Build the ginormous commandline in 'cmd'.
# It's going to be roughly
# python heapcheck_test.py ... python run_webkit_tests.py ...
# but we'll use the --indirect flag to heapcheck_test.py
# to avoid heapchecking python.
# Start by building the heapcheck_test.py commandline.
cmd = self._DefaultCommand("webkit")
# Now build script_cmd, the run_webkits_tests.py commandline
# Store each chunk in its own directory so that we can find the data later
chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
test_shell = os.path.join(self._options.build_dir, "test_shell")
out_dir = os.path.join(path_utils.ScriptDir(), "latest")
out_dir = os.path.join(out_dir, chunk_dir)
if os.path.exists(out_dir):
old_files = glob.glob(os.path.join(out_dir, "*.txt"))
for f in old_files:
os.remove(f)
else:
os.makedirs(out_dir)
script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
"run_webkit_tests.py")
script_cmd = ["python", script, "--run-singly", "-v",
"--noshow-results", "--time-out-ms=200000",
"--nocheck-sys-deps"]
# Pass build mode to run_webkit_tests.py. We aren't passed it directly,
# so parse it out of build_dir. run_webkit_tests.py can only handle
# the two values "Release" and "Debug".
# TODO(Hercules): unify how all our scripts pass around build mode
# (--mode / --target / --build_dir / --debug)
if self._options.build_dir.endswith("Debug"):
script_cmd.append("--debug");
if (chunk_size > 0):
script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
if len(self._args):
# if the arg is a txt file, then treat it as a list of tests
if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
script_cmd.append("--test-list=%s" % self._args[0])
else:
script_cmd.extend(self._args)
self._ReadGtestFilterFile("layout", script_cmd)
# Now run script_cmd with the wrapper in cmd
cmd.extend(["--"])
cmd.extend(script_cmd)
supp = self.Suppressions()
return heapcheck_test.RunTool(cmd, supp, "layout")
def TestLayout(self):
'''Runs the layout tests.'''
# A "chunk file" is maintained in the local directory so that each test
# runs a slice of the layout tests of size chunk_size that increments with
# each run. Since tests can be added and removed from the layout tests at
# any time, this is not going to give exact coverage, but it will allow us
# to continuously run small slices of the layout tests under purify rather
# than having to run all of them in one shot.
chunk_size = self._options.num_tests
if (chunk_size == 0):
return self.TestLayoutChunk(0, 0)
chunk_num = 0
chunk_file = os.path.join("heapcheck_layout_chunk.txt")
logging.info("Reading state from " + chunk_file)
try:
f = open(chunk_file)
if f:
str = f.read()
if len(str):
chunk_num = int(str)
# This should be enough so that we have a couple of complete runs
# of test data stored in the archive (although note that when we loop
# that we almost guaranteed won't be at the end of the test list)
if chunk_num > 10000:
chunk_num = 0
f.close()
except IOError, (errno, strerror):
logging.error("error reading from file %s (%d, %s)" % (chunk_file,
errno, strerror))
ret = self.TestLayoutChunk(chunk_num, chunk_size)
# Wait until after the test runs to completion to write out the new chunk
# number. This way, if the bot is killed, we'll start running again from
# the current chunk rather than skipping it.
logging.info("Saving state to " + chunk_file)
try:
f = open(chunk_file, "w")
chunk_num += 1
f.write("%d" % chunk_num)
f.close()
except IOError, (errno, strerror):
logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
strerror))
# Since we're running small chunks of the layout tests, it's important to
# mark the ones that have errors in them. These won't be visible in the
# summary list for long, but will be useful for someone reviewing this bot.
return ret
def _main(_):
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.disable_interspersed_args()
parser.add_option("-b", "--build_dir",
help="the location of the output of the compiler output")
parser.add_option("-t", "--test", action="append",
help="which test to run")
parser.add_option("", "--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("", "--gtest_repeat",
help="argument for --gtest_repeat")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
# My machine can do about 120 layout tests/hour in release mode.
# Let's do 30 minutes worth per run.
# The CPU is mostly idle, so perhaps we can raise this when
# we figure out how to run them more efficiently.
parser.add_option("-n", "--num_tests", default=60, type="int",
help="for layout tests: # of subtests per run. 0 for all.")
options, args = parser.parse_args()
if options.verbose:
logging_utils.config_root(logging.DEBUG)
else:
logging_utils.config_root()
if not options.test or not len(options.test):
parser.error("--test not specified")
for t in options.test:
tests = ChromeTests(options, args, t)
ret = tests.Run()
if ret:
return ret
return 0
if __name__ == "__main__":
if sys.platform == 'linux2':
ret = _main(sys.argv)
else:
logging.error("Heap checking works only on Linux at the moment.")
ret = 1
sys.exit(ret)
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
from resource_management.libraries.script import Script
from resource_management.libraries.functions.default import default
from resource_management.core.logger import Logger
from resource_management.core.resources.system import File, Directory, Execute, Link
from resource_management.core.source import DownloadSource, InlineTemplate, Template
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.resources.modify_properties_file import ModifyPropertiesFile
from resource_management.libraries.resources.properties_file import PropertiesFile
from resource_management.core.exceptions import Fail
from resource_management.libraries.functions.decorator import retry
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.core.utils import PasswordString
from resource_management.core.shell import as_sudo
from resource_management.libraries.functions import solr_cloud_util
from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
from resource_management.core.exceptions import ExecutionFailed
# This file contains functions used for setup/configure of Ranger Admin and Ranger Usersync.
# The design is to mimic what is done by the setup.sh script bundled by Ranger component currently.
def ranger(name=None, upgrade_type=None):
"""
parameter name: name of ranger service component
"""
if name == 'ranger_admin':
setup_ranger_admin(upgrade_type=upgrade_type)
if name == 'ranger_usersync':
setup_usersync(upgrade_type=upgrade_type)
if name == 'ranger_tagsync':
setup_tagsync(upgrade_type=upgrade_type)
def setup_ranger_admin(upgrade_type=None):
import params
if upgrade_type is None:
upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
ranger_home = params.ranger_home
ranger_conf = params.ranger_conf
Directory(ranger_conf,
owner = params.unix_user,
group = params.unix_group,
create_parents = True
)
copy_jdbc_connector()
File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
mode = 0644,
)
cp = format("{check_db_connection_jar}")
if params.db_flavor.lower() == 'sqla':
cp = cp + os.pathsep + format("{ranger_home}/ews/lib/sajdbc4.jar")
else:
cp = cp + os.pathsep + format("{driver_curl_target}")
cp = cp + os.pathsep + format("{ranger_home}/ews/lib/*")
db_connection_check_command = format(
"{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_jdbc_connection_url}' {ranger_db_user} {ranger_db_password!p} {ranger_jdbc_driver}")
env_dict = {}
if params.db_flavor.lower() == 'sqla':
env_dict = {'LD_LIBRARY_PATH':params.ld_lib_path}
Execute(db_connection_check_command, path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10, environment=env_dict)
Execute(('ln','-sf', format('{ranger_home}/ews/webapp/WEB-INF/classes/conf'), format('{ranger_home}/conf')),
not_if=format("ls {ranger_home}/conf"),
only_if=format("ls {ranger_home}/ews/webapp/WEB-INF/classes/conf"),
sudo=True)
if upgrade_type is not None:
src_file = format('{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml')
dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
Execute(('cp', '-f', src_file, dst_file), sudo=True)
src_file = format('{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml')
dst_file = format('{ranger_home}/conf/security-applicationContext.xml')
Execute(('cp', '-f', src_file, dst_file), sudo=True)
Directory(format('{ranger_home}/'),
owner = params.unix_user,
group = params.unix_group,
recursive_ownership = True,
)
Directory(params.ranger_pid_dir,
mode=0755,
owner = params.unix_user,
group = params.user_group,
cd_access = "a",
create_parents=True
)
if params.stack_supports_pid:
File(format('{ranger_conf}/ranger-admin-env-piddir.sh'),
content = format("export RANGER_PID_DIR_PATH={ranger_pid_dir}\nexport RANGER_USER={unix_user}"),
owner = params.unix_user,
group = params.unix_group,
mode=0755
)
Directory(params.admin_log_dir,
owner = params.unix_user,
group = params.unix_group,
create_parents = True,
cd_access='a',
mode=0755
)
File(format('{ranger_conf}/ranger-admin-env-logdir.sh'),
content = format("export RANGER_ADMIN_LOG_DIR={admin_log_dir}"),
owner = params.unix_user,
group = params.unix_group,
mode=0755
)
if os.path.isfile(params.ranger_admin_default_file):
File(params.ranger_admin_default_file, owner=params.unix_user, group=params.unix_group)
else:
Logger.warning('Required file {0} does not exist, copying the file to {1} path'.format(params.ranger_admin_default_file, ranger_conf))
src_file = format('{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml')
dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
Execute(('cp', '-f', src_file, dst_file), sudo=True)
File(params.ranger_admin_default_file, owner=params.unix_user, group=params.unix_group)
if os.path.isfile(params.security_app_context_file):
File(params.security_app_context_file, owner=params.unix_user, group=params.unix_group)
else:
Logger.warning('Required file {0} does not exist, copying the file to {1} path'.format(params.security_app_context_file, ranger_conf))
src_file = format('{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml')
dst_file = format('{ranger_home}/conf/security-applicationContext.xml')
Execute(('cp', '-f', src_file, dst_file), sudo=True)
File(params.security_app_context_file, owner=params.unix_user, group=params.unix_group)
if upgrade_type is not None and params.stack_supports_config_versioning:
if os.path.islink('/usr/bin/ranger-admin'):
Link('/usr/bin/ranger-admin', action="delete")
Link('/usr/bin/ranger-admin',
to=format('{ranger_home}/ews/ranger-admin-services.sh'))
Execute(('ln','-sf', format('{ranger_home}/ews/ranger-admin-services.sh'),'/usr/bin/ranger-admin'),
not_if=format("ls /usr/bin/ranger-admin"),
only_if=format("ls {ranger_home}/ews/ranger-admin-services.sh"),
sudo=True)
# remove plain-text password from xml configs
ranger_admin_site_copy = {}
ranger_admin_site_copy.update(params.config['configurations']['ranger-admin-site'])
for prop in params.ranger_admin_password_properties:
if prop in ranger_admin_site_copy:
ranger_admin_site_copy[prop] = "_"
XmlConfig("ranger-admin-site.xml",
conf_dir=ranger_conf,
configurations=ranger_admin_site_copy,
configuration_attributes=params.config['configuration_attributes']['ranger-admin-site'],
owner=params.unix_user,
group=params.unix_group,
mode=0644)
Directory(os.path.join(ranger_conf,'ranger_jaas'),
mode=0700,
owner=params.unix_user,
group=params.unix_group,
)
if params.stack_supports_ranger_log4j:
File(format('{ranger_home}/ews/webapp/WEB-INF/log4j.properties'),
owner=params.unix_user,
group=params.unix_group,
content=InlineTemplate(params.admin_log4j),
mode=0644
)
do_keystore_setup(upgrade_type=upgrade_type)
create_core_site_xml(ranger_conf)
if params.stack_supports_ranger_kerberos and params.security_enabled:
if params.is_hbase_ha_enabled and params.ranger_hbase_plugin_enabled:
XmlConfig("hbase-site.xml",
conf_dir=ranger_conf,
configurations=params.config['configurations']['hbase-site'],
configuration_attributes=params.config['configuration_attributes']['hbase-site'],
owner=params.unix_user,
group=params.unix_group,
mode=0644
)
if params.is_namenode_ha_enabled and params.ranger_hdfs_plugin_enabled:
XmlConfig("hdfs-site.xml",
conf_dir=ranger_conf,
configurations=params.config['configurations']['hdfs-site'],
configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
owner=params.unix_user,
group=params.unix_group,
mode=0644
)
def setup_ranger_db(stack_version=None):
import params
ranger_home = params.ranger_home
version = params.version
if stack_version is not None:
ranger_home = format("{stack_root}/{stack_version}/ranger-admin")
version = stack_version
copy_jdbc_connector(stack_version=version)
ModifyPropertiesFile(format("{ranger_home}/install.properties"),
properties = {'audit_store': params.ranger_audit_source_type},
owner = params.unix_user,
)
env_dict = {'RANGER_ADMIN_HOME':ranger_home, 'JAVA_HOME':params.java_home}
if params.db_flavor.lower() == 'sqla':
env_dict = {'RANGER_ADMIN_HOME':ranger_home, 'JAVA_HOME':params.java_home, 'LD_LIBRARY_PATH':params.ld_lib_path}
# User wants us to setup the DB user and DB?
if params.create_db_dbuser:
Logger.info('Setting up Ranger DB and DB User')
dba_setup = format('ambari-python-wrap {ranger_home}/dba_script.py -q')
Execute(dba_setup,
environment=env_dict,
logoutput=True,
user=params.unix_user,
)
else:
Logger.info('Separate DBA property not set. Assuming Ranger DB and DB User exists!')
db_setup = format('ambari-python-wrap {ranger_home}/db_setup.py')
Execute(db_setup,
environment=env_dict,
logoutput=True,
user=params.unix_user,
)
def setup_java_patch(stack_version=None):
import params
ranger_home = params.ranger_home
if stack_version is not None:
ranger_home = format("{stack_root}/{stack_version}/ranger-admin")
env_dict = {'RANGER_ADMIN_HOME':ranger_home, 'JAVA_HOME':params.java_home}
if params.db_flavor.lower() == 'sqla':
env_dict = {'RANGER_ADMIN_HOME':ranger_home, 'JAVA_HOME':params.java_home, 'LD_LIBRARY_PATH':params.ld_lib_path}
setup_java_patch = format('ambari-python-wrap {ranger_home}/db_setup.py -javapatch')
Execute(setup_java_patch,
environment=env_dict,
logoutput=True,
user=params.unix_user,
)
def do_keystore_setup(upgrade_type=None):
import params
ranger_home = params.ranger_home
cred_lib_path = params.cred_lib_path
if not is_empty(params.ranger_credential_provider_path):
ranger_credential_helper(cred_lib_path, params.ranger_jpa_jdbc_credential_alias, params.ranger_ambari_db_password, params.ranger_credential_provider_path)
File(params.ranger_credential_provider_path,
owner = params.unix_user,
group = params.unix_group,
mode = 0640
)
if not is_empty(params.ranger_credential_provider_path) and (params.ranger_audit_source_type).lower() == 'db' and not is_empty(params.ranger_ambari_audit_db_password):
ranger_credential_helper(cred_lib_path, params.ranger_jpa_audit_jdbc_credential_alias, params.ranger_ambari_audit_db_password, params.ranger_credential_provider_path)
File(params.ranger_credential_provider_path,
owner = params.unix_user,
group = params.unix_group,
mode = 0640
)
if params.ranger_auth_method.upper() == "LDAP":
ranger_ldap_auth_password = params.ranger_usersync_ldap_ldapbindpassword
if params.ranger_ldap_bind_auth_password != "{{ranger_usersync_ldap_ldapbindpassword}}":
ranger_ldap_auth_password = params.ranger_ldap_bind_auth_password
ranger_credential_helper(params.cred_lib_path, params.ranger_ldap_password_alias, ranger_ldap_auth_password, params.ranger_credential_provider_path)
File(params.ranger_credential_provider_path,
owner = params.unix_user,
group = params.unix_group,
mode = 0640
)
if params.ranger_auth_method.upper() == "ACTIVE_DIRECTORY":
ranger_ad_auth_password = params.ranger_usersync_ldap_ldapbindpassword
if params.ranger_ad_bind_auth_password != "{{ranger_usersync_ldap_ldapbindpassword}}":
ranger_ad_auth_password = params.ranger_ad_bind_auth_password
ranger_credential_helper(params.cred_lib_path, params.ranger_ad_password_alias, ranger_ad_auth_password, params.ranger_credential_provider_path)
File(params.ranger_credential_provider_path,
owner = params.unix_user,
group = params.unix_group,
mode = 0640
)
if params.stack_supports_secure_ssl_password:
ranger_credential_helper(params.cred_lib_path, params.ranger_truststore_alias, params.truststore_password, params.ranger_credential_provider_path)
if params.https_enabled and not params.http_enabled:
ranger_credential_helper(params.cred_lib_path, params.ranger_https_keystore_alias, params.https_keystore_password, params.ranger_credential_provider_path)
File(params.ranger_credential_provider_path,
owner = params.unix_user,
group = params.unix_group,
mode = 0640
)
def password_validation(password):
import params
if password.strip() == "":
raise Fail("Blank password is not allowed for Bind user. Please enter valid password.")
if re.search("[\\\`'\"]",password):
raise Fail("LDAP/AD bind password contains one of the unsupported special characters like \" ' \ `")
else:
Logger.info("password validated")
def copy_jdbc_connector(stack_version=None):
import params
if params.jdbc_jar_name is None and params.driver_curl_source.endswith("/None"):
error_message = format("{db_flavor} jdbc driver cannot be downloaded from {jdk_location}\nPlease run 'ambari-server setup --jdbc-db={db_flavor} --jdbc-driver={{path_to_jdbc}}' on ambari-server host.")
raise Fail(error_message)
if params.driver_curl_source and not params.driver_curl_source.endswith("/None"):
if params.previous_jdbc_jar and os.path.isfile(params.previous_jdbc_jar):
File(params.previous_jdbc_jar, action='delete')
File(params.downloaded_custom_connector,
content = DownloadSource(params.driver_curl_source),
mode = 0644
)
ranger_home = params.ranger_home
if stack_version is not None:
ranger_home = format("{stack_root}/{stack_version}/ranger-admin")
driver_curl_target = format("{ranger_home}/ews/lib/{jdbc_jar_name}")
if params.db_flavor.lower() == 'sqla':
Execute(('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir), sudo = True)
Execute(('cp', '--remove-destination', params.jar_path_in_archive, os.path.join(ranger_home, 'ews', 'lib')),
path=["/bin", "/usr/bin/"],
sudo=True)
File(os.path.join(ranger_home, 'ews', 'lib', 'sajdbc4.jar'), mode=0644)
Directory(params.jdbc_libs_dir,
cd_access="a",
create_parents=True)
Execute(as_sudo(['yes', '|', 'cp', params.libs_path_in_archive, params.jdbc_libs_dir], auto_escape=False),
path=["/bin", "/usr/bin/"])
else:
Execute(('cp', '--remove-destination', params.downloaded_custom_connector, os.path.join(ranger_home, 'ews', 'lib')),
path=["/bin", "/usr/bin/"],
sudo=True)
File(os.path.join(ranger_home, 'ews', 'lib',params.jdbc_jar_name), mode=0644)
ModifyPropertiesFile(format("{ranger_home}/install.properties"),
properties = params.config['configurations']['admin-properties'],
owner = params.unix_user,
)
if params.db_flavor.lower() == 'sqla':
ModifyPropertiesFile(format("{ranger_home}/install.properties"),
properties = {'SQL_CONNECTOR_JAR': format('{ranger_home}/ews/lib/sajdbc4.jar')},
owner = params.unix_user,
)
else:
ModifyPropertiesFile(format("{ranger_home}/install.properties"),
properties = {'SQL_CONNECTOR_JAR': format('{driver_curl_target}')},
owner = params.unix_user,
)
def setup_usersync(upgrade_type=None):
import params
usersync_home = params.usersync_home
ranger_home = params.ranger_home
ranger_ugsync_conf = params.ranger_ugsync_conf
if not is_empty(params.ranger_usersync_ldap_ldapbindpassword) and params.ug_sync_source == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder':
password_validation(params.ranger_usersync_ldap_ldapbindpassword)
Directory(params.ranger_pid_dir,
mode=0755,
owner = params.unix_user,
group = params.user_group,
cd_access = "a",
create_parents=True
)
if params.stack_supports_pid:
File(format('{ranger_ugsync_conf}/ranger-usersync-env-piddir.sh'),
content = format("export USERSYNC_PID_DIR_PATH={ranger_pid_dir}\nexport UNIX_USERSYNC_USER={unix_user}"),
owner = params.unix_user,
group = params.unix_group,
mode=0755
)
Directory(params.usersync_log_dir,
owner = params.unix_user,
group = params.unix_group,
cd_access = 'a',
create_parents=True,
mode=0755,
recursive_ownership = True
)
File(format('{ranger_ugsync_conf}/ranger-usersync-env-logdir.sh'),
content = format("export logdir={usersync_log_dir}"),
owner = params.unix_user,
group = params.unix_group,
mode=0755
)
Directory(format("{ranger_ugsync_conf}/"),
owner = params.unix_user
)
if upgrade_type is not None:
src_file = format('{usersync_home}/conf.dist/ranger-ugsync-default.xml')
dst_file = format('{usersync_home}/conf/ranger-ugsync-default.xml')
Execute(('cp', '-f', src_file, dst_file), sudo=True)
if params.stack_supports_ranger_log4j:
File(format('{usersync_home}/conf/log4j.properties'),
owner=params.unix_user,
group=params.unix_group,
content=InlineTemplate(params.usersync_log4j),
mode=0644
)
elif upgrade_type is not None and not params.stack_supports_ranger_log4j:
src_file = format('{usersync_home}/conf.dist/log4j.xml')
dst_file = format('{usersync_home}/conf/log4j.xml')
Execute(('cp', '-f', src_file, dst_file), sudo=True)
# remove plain-text password from xml configs
ranger_ugsync_site_copy = {}
ranger_ugsync_site_copy.update(params.config['configurations']['ranger-ugsync-site'])
for prop in params.ranger_usersync_password_properties:
if prop in ranger_ugsync_site_copy:
ranger_ugsync_site_copy[prop] = "_"
XmlConfig("ranger-ugsync-site.xml",
conf_dir=ranger_ugsync_conf,
configurations=ranger_ugsync_site_copy,
configuration_attributes=params.config['configuration_attributes']['ranger-ugsync-site'],
owner=params.unix_user,
group=params.unix_group,
mode=0644)
if os.path.isfile(params.ranger_ugsync_default_file):
File(params.ranger_ugsync_default_file, owner=params.unix_user, group=params.unix_group)
if os.path.isfile(params.usgsync_log4j_file):
File(params.usgsync_log4j_file, owner=params.unix_user, group=params.unix_group)
if os.path.isfile(params.cred_validator_file):
File(params.cred_validator_file, group=params.unix_group, mode=04555)
ranger_credential_helper(params.ugsync_cred_lib, 'usersync.ssl.key.password', params.ranger_usersync_keystore_password, params.ugsync_jceks_path)
if not is_empty(params.ranger_usersync_ldap_ldapbindpassword) and params.ug_sync_source == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder':
ranger_credential_helper(params.ugsync_cred_lib, 'ranger.usersync.ldap.bindalias', params.ranger_usersync_ldap_ldapbindpassword, params.ugsync_jceks_path)
ranger_credential_helper(params.ugsync_cred_lib, 'usersync.ssl.truststore.password', params.ranger_usersync_truststore_password, params.ugsync_jceks_path)
File(params.ugsync_jceks_path,
owner = params.unix_user,
group = params.unix_group,
mode = 0640
)
File([params.usersync_start, params.usersync_stop],
owner = params.unix_user,
group = params.unix_group
)
File(params.usersync_services_file,
mode = 0755,
)
Execute(('ln','-sf', format('{usersync_services_file}'),'/usr/bin/ranger-usersync'),
not_if=format("ls /usr/bin/ranger-usersync"),
only_if=format("ls {usersync_services_file}"),
sudo=True)
if not os.path.isfile(params.ranger_usersync_keystore_file):
cmd = format("{java_home}/bin/keytool -genkeypair -keyalg RSA -alias selfsigned -keystore '{ranger_usersync_keystore_file}' -keypass {ranger_usersync_keystore_password!p} -storepass {ranger_usersync_keystore_password!p} -validity 3600 -keysize 2048 -dname '{default_dn_name}'")
Execute(cmd, logoutput=True, user = params.unix_user)
File(params.ranger_usersync_keystore_file,
owner = params.unix_user,
group = params.unix_group,
mode = 0640
)
create_core_site_xml(ranger_ugsync_conf)
def setup_tagsync(upgrade_type=None):
import params
ranger_tagsync_home = params.ranger_tagsync_home
ranger_home = params.ranger_home
ranger_tagsync_conf = params.ranger_tagsync_conf
Directory(format("{ranger_tagsync_conf}"),
owner = params.unix_user,
group = params.unix_group,
create_parents = True
)
Directory(params.ranger_pid_dir,
mode=0755,
create_parents=True,
owner = params.unix_user,
group = params.user_group,
cd_access = "a",
)
if params.stack_supports_pid:
File(format('{ranger_tagsync_conf}/ranger-tagsync-env-piddir.sh'),
content = format("export TAGSYNC_PID_DIR_PATH={ranger_pid_dir}\nexport UNIX_TAGSYNC_USER={unix_user}"),
owner = params.unix_user,
group = params.unix_group,
mode=0755
)
Directory(params.tagsync_log_dir,
create_parents = True,
owner = params.unix_user,
group = params.unix_group,
cd_access = "a",
mode=0755
)
File(format('{ranger_tagsync_conf}/ranger-tagsync-env-logdir.sh'),
content = format("export RANGER_TAGSYNC_LOG_DIR={tagsync_log_dir}"),
owner = params.unix_user,
group = params.unix_group,
mode=0755
)
XmlConfig("ranger-tagsync-site.xml",
conf_dir=ranger_tagsync_conf,
configurations=params.config['configurations']['ranger-tagsync-site'],
configuration_attributes=params.config['configuration_attributes']['ranger-tagsync-site'],
owner=params.unix_user,
group=params.unix_group,
mode=0644)
if params.stack_supports_ranger_tagsync_ssl_xml_support:
Logger.info("Stack supports tagsync-ssl configurations, performing the same.")
setup_tagsync_ssl_configs()
else:
Logger.info("Stack doesnt support tagsync-ssl configurations, skipping the same.")
PropertiesFile(format('{ranger_tagsync_conf}/atlas-application.properties'),
properties = params.tagsync_application_properties,
mode=0755,
owner=params.unix_user,
group=params.unix_group
)
File(format('{ranger_tagsync_conf}/log4j.properties'),
owner=params.unix_user,
group=params.unix_group,
content=InlineTemplate(params.tagsync_log4j),
mode=0644
)
File(params.tagsync_services_file,
mode = 0755,
)
Execute(('ln','-sf', format('{tagsync_services_file}'),'/usr/bin/ranger-tagsync'),
not_if=format("ls /usr/bin/ranger-tagsync"),
only_if=format("ls {tagsync_services_file}"),
sudo=True)
create_core_site_xml(ranger_tagsync_conf)
def ranger_credential_helper(lib_path, alias_key, alias_value, file_path):
import params
java_bin = format('{java_home}/bin/java')
file_path = format('jceks://file{file_path}')
cmd = (java_bin, '-cp', lib_path, 'org.apache.ranger.credentialapi.buildks', 'create', alias_key, '-value', PasswordString(alias_value), '-provider', file_path)
Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True, sudo=True)
def create_core_site_xml(conf_dir):
import params
if params.stack_supports_ranger_kerberos:
if params.has_namenode:
XmlConfig("core-site.xml",
conf_dir=conf_dir,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.unix_user,
group=params.unix_group,
mode=0644
)
else:
Logger.warning('HDFS service not installed. Creating core-site.xml file.')
XmlConfig("core-site.xml",
conf_dir=conf_dir,
configurations=params.core_site_property,
configuration_attributes={},
owner=params.unix_user,
group=params.unix_group,
mode=0644
)
def setup_ranger_audit_solr():
import params
if params.security_enabled and params.stack_supports_ranger_kerberos:
if params.solr_jaas_file is not None:
File(format("{solr_jaas_file}"),
content=Template("ranger_solr_jaas_conf.j2"),
owner=params.unix_user
)
try:
check_znode()
if params.stack_supports_ranger_solr_configs:
Logger.info('Solr configrations supported,creating solr-configurations.')
File(format("{ranger_solr_conf}/solrconfig.xml"),
content=InlineTemplate(params.ranger_solr_config_content),
owner=params.unix_user,
group=params.unix_group,
mode=0644
)
solr_cloud_util.upload_configuration_to_zk(
zookeeper_quorum = params.zookeeper_quorum,
solr_znode = params.solr_znode,
config_set = params.ranger_solr_config_set,
config_set_dir = params.ranger_solr_conf,
tmp_dir = params.tmp_dir,
java64_home = params.java_home,
solrconfig_content = InlineTemplate(params.ranger_solr_config_content),
jaas_file=params.solr_jaas_file,
retry=30, interval=5
)
else:
Logger.info('Solr configrations not supported, skipping solr-configurations.')
solr_cloud_util.upload_configuration_to_zk(
zookeeper_quorum = params.zookeeper_quorum,
solr_znode = params.solr_znode,
config_set = params.ranger_solr_config_set,
config_set_dir = params.ranger_solr_conf,
tmp_dir = params.tmp_dir,
java64_home = params.java_home,
jaas_file=params.solr_jaas_file,
retry=30, interval=5)
if params.security_enabled and params.has_infra_solr \
and not params.is_external_solrCloud_enabled and params.stack_supports_ranger_kerberos:
solr_cloud_util.add_solr_roles(params.config,
roles = [params.infra_solr_role_ranger_admin, params.infra_solr_role_ranger_audit, params.infra_solr_role_dev],
new_service_principals = [params.ranger_admin_jaas_principal])
service_default_principals_map = [('hdfs', 'nn'), ('hbase', 'hbase'), ('hive', 'hive'), ('kafka', 'kafka'), ('kms', 'rangerkms'),
('knox', 'knox'), ('nifi', 'nifi'), ('storm', 'storm'), ('yanr', 'yarn')]
service_principals = get_ranger_plugin_principals(service_default_principals_map)
solr_cloud_util.add_solr_roles(params.config,
roles = [params.infra_solr_role_ranger_audit, params.infra_solr_role_dev],
new_service_principals = service_principals)
solr_cloud_util.create_collection(
zookeeper_quorum = params.zookeeper_quorum,
solr_znode = params.solr_znode,
collection = params.ranger_solr_collection_name,
config_set = params.ranger_solr_config_set,
java64_home = params.java_home,
shards = params.ranger_solr_shards,
replication_factor = int(params.replication_factor),
jaas_file = params.solr_jaas_file)
if params.security_enabled and params.has_infra_solr \
and not params.is_external_solrCloud_enabled and params.stack_supports_ranger_kerberos:
secure_znode(format('{solr_znode}/configs/{ranger_solr_config_set}'), params.solr_jaas_file)
secure_znode(format('{solr_znode}/collections/{ranger_solr_collection_name}'), params.solr_jaas_file)
except ExecutionFailed as execution_exception:
Logger.error('Error when configuring Solr for Ranger, Kindly check Solr/Zookeeper services to be up and running:\n {0}'.format(execution_exception))
def setup_ranger_admin_passwd_change():
import params
if params.admin_password != params.default_admin_password:
cmd = format('ambari-python-wrap {ranger_home}/db_setup.py -changepassword {admin_username} {default_admin_password!p} {admin_password!p}')
Logger.info('Updating admin password')
Execute(cmd, environment={'JAVA_HOME': params.java_home, 'RANGER_ADMIN_HOME': params.ranger_home}, user=params.unix_user)
@retry(times=10, sleep_time=5, err_class=Fail)
def check_znode():
import params
solr_cloud_util.check_znode(
zookeeper_quorum=params.zookeeper_quorum,
solr_znode=params.solr_znode,
java64_home=params.java_home)
def secure_znode(znode, jaasFile):
import params
solr_cloud_util.secure_znode(config=params.config, zookeeper_quorum=params.zookeeper_quorum,
solr_znode=znode,
jaas_file=jaasFile,
java64_home=params.java_home, sasl_users=[params.ranger_admin_jaas_principal])
def get_ranger_plugin_principals(services_defaults_tuple_list):
"""
Get ranger plugin user principals from service-default value maps using ranger-*-audit configurations
"""
import params
user_principals = []
if len(services_defaults_tuple_list) < 1:
raise Exception("Services - defaults map parameter is missing.")
for (service, default_value) in services_defaults_tuple_list:
user_principal = default(format("configurations/ranger-{service}-audit/xasecure.audit.jaas.Client.option.principal"), default_value)
user_principals.append(user_principal)
return user_principals
def setup_tagsync_ssl_configs():
import params
Directory(params.security_store_path,
cd_access="a",
create_parents=True)
Directory(params.tagsync_etc_path,
cd_access="a",
owner=params.unix_user,
group=params.unix_group,
mode=0775,
create_parents=True)
# remove plain-text password from xml configs
ranger_tagsync_policymgr_ssl_copy = {}
ranger_tagsync_policymgr_ssl_copy.update(params.config['configurations']['ranger-tagsync-policymgr-ssl'])
for prop in params.ranger_tagsync_password_properties:
if prop in ranger_tagsync_policymgr_ssl_copy:
ranger_tagsync_policymgr_ssl_copy[prop] = "_"
XmlConfig("ranger-policymgr-ssl.xml",
conf_dir=params.ranger_tagsync_conf,
configurations=ranger_tagsync_policymgr_ssl_copy,
configuration_attributes=params.config['configuration_attributes']['ranger-tagsync-policymgr-ssl'],
owner=params.unix_user,
group=params.unix_group,
mode=0644)
ranger_credential_helper(params.tagsync_cred_lib, 'sslKeyStore', params.ranger_tagsync_keystore_password, params.ranger_tagsync_credential_file)
ranger_credential_helper(params.tagsync_cred_lib, 'sslTrustStore', params.ranger_tagsync_truststore_password, params.ranger_tagsync_credential_file)
File(params.ranger_tagsync_credential_file,
owner = params.unix_user,
group = params.unix_group,
mode = 0640
)
# remove plain-text password from xml configs
atlas_tagsync_ssl_copy = {}
atlas_tagsync_ssl_copy.update(params.config['configurations']['atlas-tagsync-ssl'])
for prop in params.ranger_tagsync_password_properties:
if prop in atlas_tagsync_ssl_copy:
atlas_tagsync_ssl_copy[prop] = "_"
XmlConfig("atlas-tagsync-ssl.xml",
conf_dir=params.ranger_tagsync_conf,
configurations=atlas_tagsync_ssl_copy,
configuration_attributes=params.config['configuration_attributes']['atlas-tagsync-ssl'],
owner=params.unix_user,
group=params.unix_group,
mode=0644)
ranger_credential_helper(params.tagsync_cred_lib, 'sslKeyStore', params.atlas_tagsync_keystore_password, params.atlas_tagsync_credential_file)
ranger_credential_helper(params.tagsync_cred_lib, 'sslTrustStore', params.atlas_tagsync_truststore_password, params.atlas_tagsync_credential_file)
File(params.atlas_tagsync_credential_file,
owner = params.unix_user,
group = params.unix_group,
mode = 0640
)
Logger.info("Configuring tagsync-ssl configurations done successfully.")
def update_password_configs():
import params
password_configs = {'db_root_password': '_', 'db_password': '_'}
if params.stack_supports_ranger_audit_db:
password_configs['audit_db_password'] = '_'
ModifyPropertiesFile(format("{ranger_home}/install.properties"),
properties = password_configs,
owner = params.unix_user,
)
|
|
import datetime
import decimal
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils import timezone
from djstripe.models import Event, Transfer, Customer, CurrentSubscription, Charge
from tests.test_transfer import TRANSFER_CREATED_TEST_DATA, TRANSFER_CREATED_TEST_DATA2
class CustomerManagerTest(TestCase):
def setUp(self):
# create customers and current subscription records
period_start = datetime.datetime(2013, 4, 1, tzinfo=timezone.utc)
period_end = datetime.datetime(2013, 4, 30, tzinfo=timezone.utc)
start = datetime.datetime(2013, 1, 1, 0, 0, 1) # more realistic start
for i in range(10):
customer = Customer.objects.create(
subscriber=get_user_model().objects.create_user(username="patrick{0}".format(i),
email="patrick{0}@gmail.com".format(i)),
stripe_id="cus_xxxxxxxxxxxxxx{0}".format(i),
card_fingerprint="YYYYYYYY",
card_last_4="2342",
card_kind="Visa"
)
CurrentSubscription.objects.create(
customer=customer,
plan="test",
current_period_start=period_start,
current_period_end=period_end,
amount=(500 / decimal.Decimal("100.0")),
status="active",
start=start,
quantity=1
)
customer = Customer.objects.create(
subscriber=get_user_model().objects.create_user(username="patrick{0}".format(11),
email="patrick{0}@gmail.com".format(11)),
stripe_id="cus_xxxxxxxxxxxxxx{0}".format(11),
card_fingerprint="YYYYYYYY",
card_last_4="2342",
card_kind="Visa"
)
CurrentSubscription.objects.create(
customer=customer,
plan="test",
current_period_start=period_start,
current_period_end=period_end,
amount=(500 / decimal.Decimal("100.0")),
status="canceled",
canceled_at=period_end,
start=start,
quantity=1
)
customer = Customer.objects.create(
subscriber=get_user_model().objects.create_user(username="patrick{0}".format(12),
email="patrick{0}@gmail.com".format(12)),
stripe_id="cus_xxxxxxxxxxxxxx{0}".format(12),
card_fingerprint="YYYYYYYY",
card_last_4="2342",
card_kind="Visa"
)
CurrentSubscription.objects.create(
customer=customer,
plan="test-2",
current_period_start=period_start,
current_period_end=period_end,
amount=(500 / decimal.Decimal("100.0")),
status="active",
start=start,
quantity=1
)
def test_started_during_no_records(self):
self.assertEquals(
Customer.objects.started_during(2013, 4).count(),
0
)
def test_started_during_has_records(self):
self.assertEquals(
Customer.objects.started_during(2013, 1).count(),
12
)
def test_canceled_during(self):
self.assertEquals(
Customer.objects.canceled_during(2013, 4).count(),
1
)
def test_canceled_all(self):
self.assertEquals(
Customer.objects.canceled().count(),
1
)
def test_active_all(self):
self.assertEquals(
Customer.objects.active().count(),
11
)
def test_started_plan_summary(self):
for plan in Customer.objects.started_plan_summary_for(2013, 1):
if plan["current_subscription__plan"] == "test":
self.assertEquals(plan["count"], 11)
if plan["current_subscription__plan"] == "test-2":
self.assertEquals(plan["count"], 1)
def test_active_plan_summary(self):
for plan in Customer.objects.active_plan_summary():
if plan["current_subscription__plan"] == "test":
self.assertEquals(plan["count"], 10)
if plan["current_subscription__plan"] == "test-2":
self.assertEquals(plan["count"], 1)
def test_canceled_plan_summary(self):
for plan in Customer.objects.canceled_plan_summary_for(2013, 1):
if plan["current_subscription__plan"] == "test":
self.assertEquals(plan["count"], 1)
if plan["current_subscription__plan"] == "test-2":
self.assertEquals(plan["count"], 0)
def test_churn(self):
self.assertEquals(
Customer.objects.churn(),
decimal.Decimal("1") / decimal.Decimal("11")
)
class TransferManagerTest(TestCase):
def test_transfer_summary(self):
event = Event.objects.create(
stripe_id=TRANSFER_CREATED_TEST_DATA["id"],
kind="transfer.created",
livemode=True,
webhook_message=TRANSFER_CREATED_TEST_DATA,
validated_message=TRANSFER_CREATED_TEST_DATA,
valid=True
)
event.process()
event = Event.objects.create(
stripe_id=TRANSFER_CREATED_TEST_DATA2["id"],
kind="transfer.created",
livemode=True,
webhook_message=TRANSFER_CREATED_TEST_DATA2,
validated_message=TRANSFER_CREATED_TEST_DATA2,
valid=True
)
event.process()
self.assertEquals(Transfer.objects.during(2012, 9).count(), 2)
totals = Transfer.objects.paid_totals_for(2012, 9)
self.assertEquals(
totals["total_amount"], decimal.Decimal("19.10")
)
self.assertEquals(
totals["total_net"], decimal.Decimal("19.10")
)
self.assertEquals(
totals["total_charge_fees"], decimal.Decimal("0.90")
)
self.assertEquals(
totals["total_adjustment_fees"], decimal.Decimal("0")
)
self.assertEquals(
totals["total_refund_fees"], decimal.Decimal("0")
)
self.assertEquals(
totals["total_validation_fees"], decimal.Decimal("0")
)
class ChargeManagerTest(TestCase):
def setUp(self):
customer = Customer.objects.create(stripe_id="cus_XXXXXXX")
self.march_charge = Charge.objects.create(
stripe_id="ch_XXXXMAR1",
customer=customer,
charge_created=datetime.datetime(2015, 3, 31)
)
self.april_charge_1 = Charge.objects.create(
stripe_id="ch_XXXXAPR1",
customer=customer,
paid=True,
amount=decimal.Decimal("20.15"),
fee=decimal.Decimal("4.90"),
charge_created=datetime.datetime(2015, 4, 1)
)
self.april_charge_2 = Charge.objects.create(
stripe_id="ch_XXXXAPR2",
customer=customer,
paid=True,
amount=decimal.Decimal("10.35"),
amount_refunded=decimal.Decimal("5.35"),
charge_created=datetime.datetime(2015, 4, 18)
)
self.april_charge_3 = Charge.objects.create(
stripe_id="ch_XXXXAPR3",
customer=customer,
paid=False,
amount=decimal.Decimal("100.00"),
amount_refunded=decimal.Decimal("80.00"),
fee=decimal.Decimal("5.00"),
charge_created=datetime.datetime(2015, 4, 30)
)
self.may_charge = Charge.objects.create(
stripe_id="ch_XXXXMAY1",
customer=customer,
charge_created=datetime.datetime(2015, 5, 1)
)
self.november_charge = Charge.objects.create(
stripe_id="ch_XXXXNOV1",
customer=customer,
charge_created=datetime.datetime(2015, 11, 16)
)
self.charge_2014 = Charge.objects.create(
stripe_id="ch_XXXX20141",
customer=customer,
charge_created=datetime.datetime(2014, 12, 31)
)
self.charge_2016 = Charge.objects.create(
stripe_id="ch_XXXX20161",
customer=customer,
charge_created=datetime.datetime(2016, 1, 1)
)
def test_is_during_april_2015(self):
raw_charges = Charge.objects.during(year=2015, month=4)
charges = [charge.stripe_id for charge in raw_charges]
self.assertIn(self.april_charge_1.stripe_id, charges, "April charge 1 not in charges.")
self.assertIn(self.april_charge_2.stripe_id, charges, "April charge 2 not in charges.")
self.assertIn(self.april_charge_3.stripe_id, charges, "April charge 3 not in charges.")
self.assertNotIn(self.march_charge.stripe_id, charges, "March charge unexpectedly in charges.")
self.assertNotIn(self.may_charge.stripe_id, charges, "May charge unexpectedly in charges.")
self.assertNotIn(self.november_charge.stripe_id, charges, "November charge unexpectedly in charges.")
self.assertNotIn(self.charge_2014.stripe_id, charges, "2014 charge unexpectedly in charges.")
self.assertNotIn(self.charge_2016.stripe_id, charges, "2016 charge unexpectedly in charges.")
def test_get_paid_totals_for_april_2015(self):
paid_totals = Charge.objects.paid_totals_for(year=2015, month=4)
self.assertEqual(decimal.Decimal("30.50"), paid_totals["total_amount"], "Total amount is not correct.")
self.assertEqual(decimal.Decimal("4.90"), paid_totals["total_fee"], "Total fees is not correct.")
self.assertEqual(decimal.Decimal("5.35"), paid_totals["total_refunded"], "Total amount refunded is not correct.")
|
|
"""Collate the README.md files of all repos generated from a template, into a single Markdown file
that contains a section for each repo.
Each individual README is prepended with a header that includes the GitHub login, as inferred from the name of the
generated repo. If the README already begins with a header, the login is appended, or substituted if the header is
simply "About Me".
If a file Roster.csv with columns "GitHub Login", "Preferred Name", and "Last Name" is present in the current directory,
these names are used instead of the GitHub login.
Usage:
python collate_readmes.py
python collate_readmes.py | pandoc --from markdown --metadata pagetitle="About Me" -s -o about.html
"""
from datetime import datetime
from dateutil import tz
import re
import sys
import json
import os
import subprocess
from pathlib import Path
from string import Template
import numpy as np
import pandas as pd
from graphqlclient import GraphQLClient
def get_git_config(name):
result = subprocess.run(
"git config".split() + [name], capture_output=True, text=True
)
if result.returncode:
raise Exception(result.stderr.strip())
return result.stdout.rstrip()
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN") or get_git_config(
"user.accesstoken"
)
GH_CLIENT = GraphQLClient("https://api.github.com/graphql")
GH_CLIENT.inject_token(f"token {GITHUB_ACCESS_TOKEN}")
def query(gql, variables=None):
"""Perform a GraphQL query, with error detection and variable substition."""
variables = variables or {}
q = Template(gql).substitute(**{k: json.dumps(v) for k, v in variables.items()})
result = json.loads(GH_CLIENT.execute(q, variables))
if "errors" in result:
# TODO include err['locations'] = {'line', 'column'}
raise Exception("\n".join(err["message"] for err in result["errors"]))
return result["data"]
ORG_REPOS_GQL = """
query {
organization(login: $organization_login) {
repositories(first: 100, after: $cursor) {
nodes {
name
nameWithOwner
readme: object(expression: "master:README.md") {
... on Blob {
text
}
}
templateRepository {
nameWithOwner
}
ref(qualifiedName: "master") {
target {
... on Commit {
history(first: 100) {
edges {
node {
oid
authoredDate
committedDate
pushedDate
author {
name
email
date
}
}
}
}
}
}
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
"""
def get_generated_repos(name_with_owner):
org_login = name_with_owner.split("/")[0]
cursor = None
repos = []
while True:
variables = {"organization_login": org_login, "cursor": cursor}
result = query(ORG_REPOS_GQL, variables)
repos += result["organization"]["repositories"]["nodes"]
pageInfo = result["organization"]["repositories"]["pageInfo"]
if not pageInfo["hasNextPage"]:
break
cursor = pageInfo["endCursor"]
master = next(r for r in repos if r["nameWithOwner"] == name_with_owner)
forks = [
r
for r in repos
if r["templateRepository"]
and r["templateRepository"]["nameWithOwner"] == name_with_owner
]
return master, forks
def longest_prefix(names):
"""Find the longest common prefix of the repository names."""
return next(
names[0][:n]
for n in range(min(len(s) for s in names), 0, -1)
if len({s[:n] for s in names}) == 1
)
def annotate_repos(repos, roster):
"""Annotate repo['login'] with the login of the student who generated the repo
Find the longest common prefix of the repository names.
"""
common_prefix = longest_prefix([r["name"] for r in repos])
for r in repos:
login = r["name"][len(common_prefix) :]
r["login"] = login
r["author"] = roster.get(login, login)
# Annotate repo['commits'] with commits that Christian didn't author
r["commits"] = [
c["node"]
for c in r["ref"]["target"]["history"]["edges"]
if c["node"]["author"]["email"] != "christian@nyu.edu"
]
def read_roster():
# Set login_names to a dict login -> name
roster_path = Path("Roster.csv")
if not roster_path.exists():
return {}
roster = pd.read_csv(roster_path)
column_first_names = ["Preferred", "English", "First"]
first_names = next(
(roster[name] for name in column_first_names if name in roster), None
)
names = first_names + " " + roster["Last"]
login_names = {
login: name
for login, name in zip(roster["GitHub Login"], names)
if isinstance(name, str)
}
return login_names
def is_late_commit(commit):
return commit["author"]["date"] > "2019-09-09T03:00:00+08:00"
def print_late_commits(repos):
# Show repos that were turned in late or not at all
# report missing and late assignments
warnings = {
"No commits": [r for r in repos if not r["commits"]],
"Late": [r for r in repos if all(map(is_late_commit, r["commits"]))],
"Some late commits": [
r for r in repos if any(map(is_late_commit, r["commits"]))
],
}
# only reported
reported = []
for label, rs in warnings.items():
rs = [r for r in rs if r not in reported]
reported += rs
if rs:
print(f"{label}: {', '.join(sorted(r['login'] for r in rs))}")
for r in repos:
commits = [c for c in r["commits"] if is_late_commit(c)]
if not commits:
continue
print(f" {r['login']}:")
timestamps = {c["author"]["date"] for c in commits}
for ts in timestamps:
dt = (
datetime.fromisoformat(ts)
.astimezone(tz.gettz("China"))
.strftime("%H:%M %a, %b %-d")
)
print(f" {dt}")
def increment_headings(markdown):
"""Increment all the heading levels of a markdown string, if it contains level-one heading.
This also normalizes heading lines "#\s*title" -> "# title"
Note: this doesn't know not to look in fenced blocks
"""
# Normalize the '## ' spacing
markdown = re.sub(r"^(#+)\s*", r"\1 ", markdown, 0, re.M)
# If there's an H1, increment all the Hn's
if re.compile(r"^# ", re.M).search(markdown):
markdown = re.sub(r"^(#+)", r"\1# ", markdown, 0, re.M)
return markdown
def print_collated_readme(repos):
# print collated readme
for r in repos:
name = r["author"]
title, about = None, r["readme"]["text"].strip()
if about.startswith("# "):
title, about = about.split("\n", 1)
if not title or title == "# About Me":
title = "# " + name
if name not in title:
title += f" ({name})"
print(increment_headings(title + "\n" + about))
print("\n---\n")
def main():
master, repos = get_generated_repos("application-lab/1-WELCOME-TO-APPLAB")
annotate_repos(repos, read_roster())
repos = [r for r in repos if r["commits"]]
repos.sort(key=lambda r: r["author"])
if False:
print_late_commits(repos)
if True: # print collated readme
print_collated_readme(repos)
if __name__ == "__main__":
main()
|
|
#! /usr/bin/env python
"""
Parse throughput data for RBAR from trace files.
Revision Info
=============
* $LastChangedBy: mandke $
* $LastChangedDate: 2011-10-21 15:51:37 -0500 (Fri, 21 Oct 2011) $
* $LastChangedRevision: 5246 $
:author: Ketan Mandke <kmandke@mail.utexas.edu>
:copyright:
Copyright 2009 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "restructuredtext en"
from wins import *
from wins.ieee80211 import *
from optparse import OptionParser
import sys
from copy import copy
import numpy as np
def read_trace(options, tracefile):
# load trace from file
tr = Trace()
tr.read(tracefile)
# return trace
return tr
def parse_tput_info(trace, options, **kwargs):
# initialize parameters
ninfo = {}
packetlength = None
for p in ['packetlength']:
exec("if '%s' in kwargs: %s = kwargs['%s']"%(p,p,p))
pinfo, pktname, avgsnr = None, None, None
snrdata, avgsnr = {}, None
agtsnd = []
# parse trace
for e in trace.events:
# get event parameters
ts = float(e['ts'])
n, obj, evt = e['nid'], e['obj'], e['event']
uid, pkt, pid = e['uid'], e['packet'], e['pid']
try: plen = int(e['plen'])
except: plen = None
try: pid = int(e['pid'])
except: pid = None
try: sinr = float(e['sinr'].replace("dB", ""))
except: sinr = None
# check timestamp
if (ts<options.tmin):
continue # skip if ts < Tmin
elif (ts>options.tmax):
stoptime = ts
break # stop if ts > Tmax
# check if motion control information is found
if (obj=="MC") and (evt=="AVGSNR"):
avgsnr = e['snr']
if (pinfo is None): pinfo = {}
snrdata[avgsnr] = {'ts': [], 'snr': []}
if (pinfo is None): continue
# check for new packet being sent
if (obj=="AGT") and (evt=="SND") and (pid>=0) and (plen>0):
assert (avgsnr is not None)
pktname = "%s.%s"%(obj, pid)
assert (pktname not in pinfo)
agtsnd.append(pktname) # buffer pktname
pinfo[pktname] = {}
pinfo[pktname]['tstart'] = float(ts)
pinfo[pktname]['tstop'] = None
pinfo[pktname]['latency'] = None
pinfo[pktname]['plen'] = plen
pinfo[pktname]['avgsnr'] = float(avgsnr.replace("dB", ""))
pinfo[pktname]['snr'] = None
packetlength = plen
# check for packet being delivered to receiver
if (obj=="AGT") and (evt=="RCV") and (pid>=0) and (plen>0):
pktname = "%s.%s"%(obj, pid)
repeat = (pktname in pinfo) and (pinfo[pktname]['latency'] is not None)
assert agtsnd or repeat
assert (pktname in pinfo)
assert (sinr is not None)
if not repeat:
pname = agtsnd.pop(0)
while (pname!=pktname): pname = agtsnd.pop(0)
pinfo[pktname]['tstop'] = float(ts)
pinfo[pktname]['latency'] = float(ts)-pinfo[pktname]['tstart']
pinfo[pktname]['snr'] = float(sinr)
# update snr data
if ('phy-sinr' in e):
snrdata[avgsnr]['ts'].append(float(ts))
snrdata[avgsnr]['snr'].append(float(e['phy-sinr'].replace("dB","")))
elif ('dot11n-sinr' in e):
snrdata[avgsnr]['ts'].append(float(ts))
snrdata[avgsnr]['snr'].append(float(e['dot11n-sinr'].replace("dB","")))
# update stoptime
if 'stoptime' in e: stoptime = float(e['stoptime'])
# return node info and parameters
for p in ['packetlength']:
exec("assert (%s is not None), '%s is None, ts = %.5f'"%(p, p, ts))
param = {'packetlength':packetlength}
return pinfo, snrdata, param
def calc_avgsnr(snrdata):
# calculate actual average SNR for every expected SNR
snr = {}
for a, d in snrdata.items():
x = np.array(d['ts'])
y = np.array(d['snr'])
snr[a] = a
assert (len(x)==len(y))
if (len(x)>1):
dx = x[1:] - x[:-1] # dt
T = 1.0*(x[-1] - x[0]) # T
avg = (0.5*(y[:-1]+y[1:])*dx).sum()/T # integrate
snr[a] = "%.4f dB"%(avg)
elif (len(x)>0):
snr[a] = "%.4f dB"%(d['snr'][0])
return snr
def calc_tput(pinfo, options):
# calculate tput for each average SNR value
data, plen = [], None
for p,d in pinfo.items():
plen = d['plen']
avgsnr = d['avgsnr']
latency = d['latency']
# get x-value: SNR
x = d['snr']
if options.use_average or (x is None): x = avgsnr
# get y-value: Tput
y = 0
if latency is not None: y = 1e-6*(8.0*plen/latency) # record Mbps
# record data point
data.append({'x':x, 'y':y, 'ndata':1})
return data
def parse_tput():
usage = "%prog [OPTIONS] TRACEFILE1 [TRACEFILE2 ...]\n" + \
" Writes parsed data to standard output."
parser = OptionParser(usage=usage)
parser.add_option("", "--tmin", dest="tmin", \
type="float", default=-np.inf, \
help="Minimum simulation timestamp to parse [default=%default]")
parser.add_option("", "--tmax", dest="tmax", \
type="float", default=np.inf, \
help="Maximum simulation timestamp to parse [default=%default]")
parser.add_option("", "--use-average", dest="use_average", \
action="store_true", default=False, \
help="Bin data to average or expected SNR [default=%default]")
(options, args) = parser.parse_args()
if len(args)<1:
print "Insufficient number of arguments."
parser.print_help()
raise SystemExit
tracefile = args[0:]
numtraces = len(tracefile)
# set parameters
default_parameters = {'xlabel': "SNR", \
'ylabel': "Throughput (Mbps)", \
'title': "Throughput vs. SNR", \
'label': None, \
'source': None, \
'format': None}
formats = ['bo', 'ro', 'go']
for k in range(numtraces):
tfile = tracefile[k]
# treat as normal wins trace file
trace = read_trace(options, tfile)
if not trace: continue
sys.stderr.write("Parsing trace from %s ...\n"%(tfile))
# parse Tput info from trace
pinfo, snrdata, param = parse_tput_info(trace, options)
# calculate throughput data -> output new data and parameters
data = calc_tput(pinfo, options)
if data:
plen = param['packetlength']
param.update(default_parameters.copy())
param['format'] = formats[k%len(formats)]
param['source'] = tfile
param['label'] = " ${\\rm T}_{put}$, $L = %d$ bytes"%(plen)
assert (param['label'] is not None)
parsed_data = {'parameters': param, 'data': data}
sys.stdout.write("%s\n"%(parsed_data) )
# calculate average SNR -> write to stderr
actual = calc_avgsnr(snrdata)
sys.stderr.write("Expected/Actual SNR:\n %s\n"%(actual))
if __name__ == '__main__':
parse_tput()
|
|
"""
Contains GUI forms for the slice filter.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from PySide2 import QtCore, QtWidgets
from . import base
from ...rendering import slicePlane
from ...filtering.filters import sliceFilter
################################################################################
class SliceSettingsDialog(base.GenericSettingsDialog):
"""
Slice filter settings form.
"""
def __init__(self, mainWindow, title, parent=None):
super(SliceSettingsDialog, self).__init__(title, parent, "Slice")
# slice plane
self.slicePlane = slicePlane.SlicePlane(self.pipelinePage)
# settings
self._settings = sliceFilter.SliceFilterSettings()
# defaults
lattice = self.pipelinePage.inputState
self._settings.updateSetting("x0", lattice.cellDims[0] / 2.0)
self._settings.updateSetting("y0", lattice.cellDims[1] / 2.0)
self._settings.updateSetting("z0", lattice.cellDims[2] / 2.0)
self.showSlicePlaneChecked = False
# show slice plane
self.showSlicePlaneCheck = QtWidgets.QCheckBox()
self.showSlicePlaneCheck.stateChanged.connect(self.showPlaneChanged)
self.showSlicePlaneCheck.setToolTip("Show the slice plane as a visual aid")
self.contentLayout.addRow("Show slice plane", self.showSlicePlaneCheck)
self.addHorizontalDivider()
# plane centre
x0SpinBox = QtWidgets.QDoubleSpinBox()
x0SpinBox.setSingleStep(1)
x0SpinBox.setMinimum(-1000)
x0SpinBox.setMaximum(1000)
x0SpinBox.setValue(self._settings.getSetting("x0"))
x0SpinBox.setToolTip("Plane centre x value")
x0SpinBox.valueChanged.connect(self.x0Changed)
y0SpinBox = QtWidgets.QDoubleSpinBox()
y0SpinBox.setSingleStep(1)
y0SpinBox.setMinimum(-1000)
y0SpinBox.setMaximum(1000)
y0SpinBox.setValue(self._settings.getSetting("y0"))
y0SpinBox.setToolTip("Plane centre y value")
y0SpinBox.valueChanged.connect(self.y0Changed)
z0SpinBox = QtWidgets.QDoubleSpinBox()
z0SpinBox.setSingleStep(1)
z0SpinBox.setMinimum(-1000)
z0SpinBox.setMaximum(1000)
z0SpinBox.setValue(self._settings.getSetting("z0"))
z0SpinBox.setToolTip("Plane centre z value")
z0SpinBox.valueChanged.connect(self.z0Changed)
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(x0SpinBox)
hbox.addWidget(QtWidgets.QLabel(","))
hbox.addWidget(y0SpinBox)
hbox.addWidget(QtWidgets.QLabel(","))
hbox.addWidget(z0SpinBox)
self.contentLayout.addRow("Place centre", hbox)
# plane normal
xnSpinBox = QtWidgets.QDoubleSpinBox()
xnSpinBox.setSingleStep(0.1)
xnSpinBox.setMinimum(-1000)
xnSpinBox.setMaximum(1000)
xnSpinBox.setValue(self._settings.getSetting("xn"))
xnSpinBox.setToolTip("Plane normal x value")
xnSpinBox.valueChanged.connect(self.xnChanged)
ynSpinBox = QtWidgets.QDoubleSpinBox()
ynSpinBox.setSingleStep(0.1)
ynSpinBox.setMinimum(-1000)
ynSpinBox.setMaximum(1000)
ynSpinBox.setValue(self._settings.getSetting("yn"))
ynSpinBox.setToolTip("Plane normal y value")
ynSpinBox.valueChanged.connect(self.ynChanged)
znSpinBox = QtWidgets.QDoubleSpinBox()
znSpinBox.setSingleStep(0.1)
znSpinBox.setMinimum(-1000)
znSpinBox.setMaximum(1000)
znSpinBox.setValue(self._settings.getSetting("zn"))
znSpinBox.setToolTip("Plane normal z value")
znSpinBox.valueChanged.connect(self.znChanged)
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(xnSpinBox)
hbox.addWidget(QtWidgets.QLabel(","))
hbox.addWidget(ynSpinBox)
hbox.addWidget(QtWidgets.QLabel(","))
hbox.addWidget(znSpinBox)
self.contentLayout.addRow("Place normal", hbox)
self.addHorizontalDivider()
# invert
self.invertCheck = QtWidgets.QCheckBox()
self.invertCheck.stateChanged.connect(self.changeInvert)
self.invertCheck.setToolTip("Invert the selection of atoms")
self.contentLayout.addRow("Invert selection", self.invertCheck)
def refresh(self):
"""
Called whenever new input is loaded.
"""
# need to change min/max of sliders for x0,y0,z0
pass
def showPlaneChanged(self, state):
"""
Show slice plane.
"""
if self.showSlicePlaneCheck.isChecked():
self.showSlicePlaneChecked = True
self.showSlicePlane()
else:
self.showSlicePlaneChecked = False
self.hideSlicePlane()
def changeInvert(self, state):
"""
Change invert.
"""
checked = False if state == QtCore.Qt.Unchecked else True
self._settings.updateSetting("invert", checked)
def x0Changed(self, val):
"""
x0 changed.
"""
self._settings.updateSetting("x0", val)
self.showSlicePlane()
def y0Changed(self, val):
"""
y0 changed.
"""
self._settings.updateSetting("y0", val)
self.showSlicePlane()
def z0Changed(self, val):
"""
z0 changed.
"""
self._settings.updateSetting("z0", val)
self.showSlicePlane()
def xnChanged(self, val):
"""
xn changed.
"""
self._settings.updateSetting("xn", val)
self.showSlicePlane()
def ynChanged(self, val):
"""
yn changed.
"""
self._settings.updateSetting("yn", val)
self.showSlicePlane()
def znChanged(self, val):
"""
zn changed.
"""
self._settings.updateSetting("zn", val)
self.showSlicePlane()
def showSlicePlane(self):
"""
Update position of slice plane.
"""
if not self.showSlicePlaneChecked:
return
# first remove it is already shown
# args to pass
p = (self._settings.getSetting("x0"), self._settings.getSetting("y0"), self._settings.getSetting("z0"))
n = (self._settings.getSetting("xn"), self._settings.getSetting("yn"), self._settings.getSetting("zn"))
# update actor
self.slicePlane.update(p, n)
# broadcast to renderers
self.parent.filterTab.broadcastToRenderers("showSlicePlane", args=(self.slicePlane,))
def hideSlicePlane(self):
"""
Hide the slice plane.
"""
# broadcast to renderers
self.parent.filterTab.broadcastToRenderers("removeSlicePlane", globalBcast=True)
def closeEvent(self, event):
"""
Override closeEvent.
"""
if self.showSlicePlaneChecked:
self.showSlicePlaneCheck.setCheckState(QtCore.Qt.Unchecked)
self.showSlicePlaneChecked = False
self.hide()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import threading
import time
import warnings
import pandas as pd
import apache_beam as beam
from apache_beam.dataframe.convert import to_pcollection
from apache_beam.dataframe.frame_base import DeferredBase
from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload
from apache_beam.runners.interactive import background_caching_job as bcj
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import interactive_runner as ir
from apache_beam.runners.interactive import pipeline_fragment as pf
from apache_beam.runners.interactive import pipeline_instrument as pi
from apache_beam.runners.interactive import utils
from apache_beam.runners.runner import PipelineState
_LOGGER = logging.getLogger(__name__)
class ElementStream:
"""A stream of elements from a given PCollection."""
def __init__(
self,
pcoll, # type: beam.pvalue.PCollection
var, # type: str
cache_key, # type: str
max_n, # type: int
max_duration_secs # type: float
):
self._pcoll = pcoll
self._cache_key = cache_key
self._pipeline = pcoll.pipeline
self._var = var
self._n = max_n
self._duration_secs = max_duration_secs
# A small state variable that when True, indicates that no more new elements
# will be yielded if read() is called again.
self._done = False
@property
def var(self):
# type: () -> str
"""Returns the variable named that defined this PCollection."""
return self._var
@property
def cache_key(self):
# type: () -> str
"""Returns the cache key for this stream."""
return self._cache_key
def display_id(self, suffix):
# type: (str) -> str
"""Returns a unique id able to be displayed in a web browser."""
return utils.obfuscate(self._cache_key, suffix)
def is_computed(self):
# type: () -> boolean
"""Returns True if no more elements will be recorded."""
return self._pcoll in ie.current_env().computed_pcollections
def is_done(self):
# type: () -> boolean
"""Returns True if no more new elements will be yielded."""
return self._done
def read(self, tail=True):
# type: (boolean) -> Any
"""Reads the elements currently recorded."""
# Get the cache manager and wait until the file exists.
cache_manager = ie.current_env().get_cache_manager(self._pipeline)
# Retrieve the coder for the particular PCollection which will be used to
# decode elements read from cache.
coder = cache_manager.load_pcoder('full', self._cache_key)
# Read the elements from the cache.
# Import limiters here to prevent a circular import.
from apache_beam.runners.interactive.options.capture_limiters import CountLimiter
from apache_beam.runners.interactive.options.capture_limiters import ProcessingTimeLimiter
reader, _ = cache_manager.read('full', self._cache_key, tail=tail)
# Because a single TestStreamFileRecord can yield multiple elements, we
# limit the count again here in the to_element_list call.
#
# There are two ways of exiting this loop either a limiter was triggered or
# all elements from the cache were read. In the latter situation, it may be
# the case that the pipeline was still running. Thus, another invocation of
# `read` will yield new elements.
count_limiter = CountLimiter(self._n)
time_limiter = ProcessingTimeLimiter(self._duration_secs)
limiters = (count_limiter, time_limiter)
for e in utils.to_element_list(reader,
coder,
include_window_info=True,
n=self._n,
include_time_events=True):
# From the to_element_list we either get TestStreamPayload.Events if
# include_time_events or decoded elements from the reader. Make sure we
# only count the decoded elements to break early.
if isinstance(e, TestStreamPayload.Event):
time_limiter.update(e)
else:
count_limiter.update(e)
yield e
if any(l.is_triggered() for l in limiters):
break
# A limiter being triggered means that we have fulfilled the user's request.
# This implies that reading from the cache again won't yield any new
# elements. WLOG, this applies to the user pipeline being terminated.
if any(l.is_triggered()
for l in limiters) or ie.current_env().is_terminated(self._pipeline):
self._done = True
class Recording:
"""A group of PCollections from a given pipeline run."""
def __init__(
self,
user_pipeline, # type: beam.Pipeline
pcolls, # type: List[beam.pvalue.PCollection]
result, # type: beam.runner.PipelineResult
pipeline_instrument, # type: beam.runners.interactive.PipelineInstrument
max_n, # type: int
max_duration_secs, # type: float
):
self._user_pipeline = user_pipeline
self._result = result
self._result_lock = threading.Lock()
self._pcolls = pcolls
pcoll_var = lambda pcoll: pipeline_instrument.cacheable_var_by_pcoll_id(
pipeline_instrument.pcolls_to_pcoll_id.get(str(pcoll), None))
self._streams = {
pcoll: ElementStream(
pcoll,
pcoll_var(pcoll),
pipeline_instrument.cache_key(pcoll),
max_n,
max_duration_secs)
for pcoll in pcolls
}
self._start = time.time()
self._duration_secs = max_duration_secs
self._set_computed = bcj.is_cache_complete(str(id(user_pipeline)))
# Run a separate thread for marking the PCollections done. This is because
# the pipeline run may be asynchronous.
self._mark_computed = threading.Thread(target=self._mark_all_computed)
self._mark_computed.daemon = True
self._mark_computed.start()
def _mark_all_computed(self):
# type: () -> None
"""Marks all the PCollections upon a successful pipeline run."""
if not self._result:
return
while not PipelineState.is_terminal(self._result.state):
with self._result_lock:
bcj = ie.current_env().get_background_caching_job(self._user_pipeline)
if bcj and bcj.is_done():
self._result.wait_until_finish()
elif time.time() - self._start >= self._duration_secs:
self._result.cancel()
self._result.wait_until_finish()
elif all(s.is_done() for s in self._streams.values()):
self._result.cancel()
self._result.wait_until_finish()
time.sleep(0.1)
# Mark the PCollection as computed so that Interactive Beam wouldn't need to
# re-compute.
if self._result.state is PipelineState.DONE and self._set_computed:
ie.current_env().mark_pcollection_computed(self._pcolls)
def is_computed(self):
# type: () -> boolean
"""Returns True if all PCollections are computed."""
return all(s.is_computed() for s in self._streams.values())
def stream(self, pcoll):
# type: (beam.pvalue.PCollection) -> ElementStream
"""Returns an ElementStream for a given PCollection."""
return self._streams[pcoll]
def computed(self):
# type: () -> None
"""Returns all computed ElementStreams."""
return {p: s for p, s in self._streams.items() if s.is_computed()}
def uncomputed(self):
# type: () -> None
"""Returns all uncomputed ElementStreams."""
return {p: s for p, s in self._streams.items() if not s.is_computed()}
def cancel(self):
# type: () -> None
"""Cancels the recording."""
with self._result_lock:
self._result.cancel()
def wait_until_finish(self):
# type: () -> None
"""Waits until the pipeline is done and returns the final state.
This also marks any PCollections as computed right away if the pipeline is
successful.
"""
if not self._result:
return beam.runners.runner.PipelineState.DONE
self._mark_computed.join()
return self._result.state
def describe(self):
# type: () -> dict[str, int]
"""Returns a dictionary describing the cache and recording."""
cache_manager = ie.current_env().get_cache_manager(self._user_pipeline)
size = sum(
cache_manager.size('full', s.cache_key) for s in self._streams.values())
return {'size': size, 'duration': self._duration_secs}
class RecordingManager:
"""Manages recordings of PCollections for a given pipeline."""
def __init__(self, user_pipeline, pipeline_var=None, test_limiters=None):
# type: (beam.Pipeline, str, list[Limiter]) -> None
self.user_pipeline = user_pipeline # type: beam.Pipeline
self.pipeline_var = pipeline_var if pipeline_var else '' # type: str
self._recordings = set() # type: set[Recording]
self._start_time_sec = 0 # type: float
self._test_limiters = test_limiters if test_limiters else []
def _watch(self, pcolls):
# type: (List[beam.pvalue.PCollection]) -> None
"""Watch any pcollections not being watched.
This allows for the underlying caching layer to identify the PCollection as
something to be cached.
"""
watched_pcollections = set()
watched_dataframes = set()
for watching in ie.current_env().watching():
for _, val in watching:
if isinstance(val, beam.pvalue.PCollection):
watched_pcollections.add(val)
elif isinstance(val, DeferredBase):
watched_dataframes.add(val)
# Convert them one-by-one to generate a unique label for each. This allows
# caching at a more fine-grained granularity.
#
# TODO(BEAM-12388): investigate the mixing pcollections in multiple
# pipelines error when using the default label.
for df in watched_dataframes:
pcoll = to_pcollection(df, yield_elements='pandas', label=str(df._expr))
watched_pcollections.add(pcoll)
for pcoll in pcolls:
if pcoll not in watched_pcollections:
ie.current_env().watch(
{'anonymous_pcollection_{}'.format(id(pcoll)): pcoll})
def _clear(self, pipeline_instrument):
# type: (List[beam.pvalue.PCollection]) -> None
"""Clears the recording of all non-source PCollections."""
cache_manager = ie.current_env().get_cache_manager(self.user_pipeline)
# Only clear the PCollections that aren't being populated from the
# BackgroundCachingJob.
computed = ie.current_env().computed_pcollections
cacheables = [
c for c in pipeline_instrument.cacheables.values()
if c.pcoll.pipeline is self.user_pipeline and c.pcoll not in computed
]
all_cached = set(str(c.to_key()) for c in cacheables)
source_pcolls = getattr(cache_manager, 'capture_keys', set())
to_clear = all_cached - source_pcolls
self._clear_pcolls(cache_manager, set(to_clear))
def _clear_pcolls(self, cache_manager, pcolls):
for pc in pcolls:
cache_manager.clear('full', pc)
def clear(self):
# type: () -> None
"""Clears all cached PCollections for this RecordingManager."""
cache_manager = ie.current_env().get_cache_manager(self.user_pipeline)
if cache_manager:
cache_manager.cleanup()
def cancel(self):
# type: (None) -> None
"""Cancels the current background recording job."""
bcj.attempt_to_cancel_background_caching_job(self.user_pipeline)
for r in self._recordings:
r.wait_until_finish()
self._recordings = set()
# The recordings rely on a reference to the BCJ to correctly finish. So we
# evict the BCJ after they complete.
ie.current_env().evict_background_caching_job(self.user_pipeline)
def describe(self):
# type: () -> dict[str, int]
"""Returns a dictionary describing the cache and recording."""
cache_manager = ie.current_env().get_cache_manager(self.user_pipeline)
capture_size = getattr(cache_manager, 'capture_size', 0)
descriptions = [r.describe() for r in self._recordings]
size = sum(d['size'] for d in descriptions) + capture_size
start = self._start_time_sec
bcj = ie.current_env().get_background_caching_job(self.user_pipeline)
if bcj:
state = bcj.state
else:
state = PipelineState.STOPPED
return {
'size': size,
'start': start,
'state': state,
'pipeline_var': self.pipeline_var
}
def record_pipeline(self):
# type: () -> bool
"""Starts a background caching job for this RecordingManager's pipeline."""
runner = self.user_pipeline.runner
if isinstance(runner, ir.InteractiveRunner):
runner = runner._underlying_runner
# Make sure that sources without a user reference are still cached.
ie.current_env().add_user_pipeline(self.user_pipeline)
pi.watch_sources(self.user_pipeline)
# Attempt to run background caching job to record any sources.
if ie.current_env().is_in_ipython:
warnings.filterwarnings(
'ignore',
'options is deprecated since First stable release. References to '
'<pipeline>.options will not be supported',
category=DeprecationWarning)
if bcj.attempt_to_run_background_caching_job(
runner,
self.user_pipeline,
options=self.user_pipeline.options,
limiters=self._test_limiters):
self._start_time_sec = time.time()
return True
return False
def record(self, pcolls, max_n, max_duration):
# type: (List[beam.pvalue.PCollection], int, Union[int,str]) -> Recording
"""Records the given PCollections."""
# Assert that all PCollection come from the same user_pipeline.
for pcoll in pcolls:
assert pcoll.pipeline is self.user_pipeline, (
'{} belongs to a different user-defined pipeline ({}) than that of'
' other PCollections ({}).'.format(
pcoll, pcoll.pipeline, self.user_pipeline))
if isinstance(max_duration, str) and max_duration != 'inf':
max_duration_secs = pd.to_timedelta(max_duration).total_seconds()
else:
max_duration_secs = max_duration
# Make sure that all PCollections to be shown are watched. If a PCollection
# has not been watched, make up a variable name for that PCollection and
# watch it. No validation is needed here because the watch logic can handle
# arbitrary variables.
self._watch(pcolls)
pipeline_instrument = pi.PipelineInstrument(self.user_pipeline)
self.record_pipeline()
# Get the subset of computed PCollections. These do not to be recomputed.
computed_pcolls = set(
pcoll for pcoll in pcolls
if pcoll in ie.current_env().computed_pcollections)
# Start a pipeline fragment to start computing the PCollections.
uncomputed_pcolls = set(pcolls).difference(computed_pcolls)
if uncomputed_pcolls:
# Clear the cache of the given uncomputed PCollections because they are
# incomplete.
self._clear(pipeline_instrument)
warnings.filterwarnings(
'ignore',
'options is deprecated since First stable release. References to '
'<pipeline>.options will not be supported',
category=DeprecationWarning)
pf.PipelineFragment(list(uncomputed_pcolls),
self.user_pipeline.options).run()
result = ie.current_env().pipeline_result(self.user_pipeline)
else:
result = None
recording = Recording(
self.user_pipeline,
pcolls,
result,
pipeline_instrument,
max_n,
max_duration_secs)
self._recordings.add(recording)
return recording
|
|
#!/usr/bin/env python
''' unit test suite for multi-record set features of rdbhost, and the nextset
cursor method of rdbhdb.
'''
import unittest
import time
import sys, os
import accounts
sys.path.insert(0, '..\lib')
from rdbhdb import rdbhdb
need_version = '0.11.0'
class test_nextset(unittest.TestCase):
driver = rdbhdb
# get choice of server from environment
HOST = os.environ.get('RDBHOST_TEST', "dev.rdbhost.com").strip("'")
connect_args = ()
connect_kw_args = {
'role': accounts.demo['role'],
'authcode': accounts.demo['authcode'],
'host': HOST }
lower_func = 'lower' # For stored procedure test
table_prefix = 'extras_' # If you need to specify a prefix for tables
ddl1 = '''CREATE TABLE %scities (name varchar(80));''' % table_prefix
ddl2 = '''CREATE TABLE %sstates (name varchar(80));''' % table_prefix
ddl3 = '''CREATE TABLE %sTest (value) AS SELECT * FROM generate_series(0, 509);''' % table_prefix
xddl1 = 'drop table %scities' % table_prefix
xddl2 = 'drop table %sstates' % table_prefix
xddl3 = 'drop table %sTest' % table_prefix
xddl4 = 'drop table %sdummy' % table_prefix
lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase
# Some drivers may need to override these helpers, for example adding
# a 'commit' after the execute.
def executeDDL1(self, cursor):
cursor.execute(self.ddl1)
def executeDDL2(self, cursor):
cursor.execute(self.ddl2)
def executeDDL3(self, cursor):
cursor.execute(self.ddl3)
def setUp(self):
# Call superclass setUp In case this does something in the
# future
try:
con = self._connect()
con.close()
except Exception as e:
print('connection not made. %s db must be created online.'%e.args[0])
sys.exit(2)
def tearDown(self):
''' self.drivers should override this method to perform required cleanup
if any is necessary, such as deleting the test database.
The default drops the tables that may be created.
'''
con = self._connect()
try:
cur = con.cursor()
for ddl in (self.xddl1, self.xddl2, self.xddl3, self.xddl4):
try:
cur.execute(ddl)
con.commit()
except self.driver.Error:
# Assume table didn't exist. Other tests will check if
# execute is busted.
pass
finally:
con.close()
def _connect(self):
try:
return self.driver.connect(
*self.connect_args, **self.connect_kw_args
)
except AttributeError:
self.fail("No connect method found in self.driver module")
def test0_host(self):
print('using SERVER', self.HOST, file=sys.stderr)
def test1_version(self):
self.assertTrue(rdbhdb.__version__ >= need_version, rdbhdb.__version__)
samples = [
'Atlanta',
'Boston',
'Chicago',
'Houston',
'Madison',
'Memphis'
]
def _populate(self):
''' Return a list of sql commands to setup the DB for the fetch
tests.
'''
populate = [
"insert into %scities values ('%s')" % (self.table_prefix, s)
for s in self.samples
]
return populate
def test_nextset(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.executeDDL2(cur)
for sql in self._populate():
cur.execute(sql)
q = '''select name from %scities;
create table %sdummy();
select name from %sstates;''' %\
((self.table_prefix, )*3)
cur.execute(q)
# first (default) recordset
r = cur.fetchall()
self.assertEqual(len(r), 6,
'cursor.fetchmany retrieved incorrect number of rows ')
self.assertTrue(cur.rowcount in (-1, 6))
# Make sure we get the right data back out
for i in range(0, 6):
self.assertEqual(str(r[i][0]), str(self.samples[i]),
'incorrect data retrieved by cursor.fetchmany %s %s'%\
(r[i], self.samples[i]))
# next recordset (no data)
s = cur.nextset()
self.assertTrue(s, 'nextset return false neg')
self.assertRaises(rdbhdb.Error, cur.fetchall)
self.assertTrue(cur.rowcount == -1, cur.rowcount)
self.assertTrue(not cur._records)
# next recordset (0 recs)
s = cur.nextset()
self.assertTrue(s, 'nextset return false neg')
r = cur.fetchall()
self.assertTrue(cur.rowcount == 0, cur.rowcount)
self.assertTrue(len(cur._records)==0)
# no more recordsets
s = cur.nextset()
self.assertTrue(not s, 'nextset return false pos at end of data')
self.assertRaises(rdbhdb.Error, cur.fetchall)
self.assertTrue(cur.rowcount == -1, cur.rowcount)
self.assertTrue(not cur._records)
finally:
con.close()
def test_limits(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.executeDDL2(cur)
for sql in self._populate():
cur.execute(sql)
self.executeDDL3(cur)
q = '''select name from %scities;
create table %sdummy();
select value, '0' from %sTest;''' %\
((self.table_prefix, )*3)
cur.execute(q)
# first (default) recordset
r = cur.fetchall()
self.assertEqual(len(r), 6,
'cursor.fetchmany retrieved incorrect number of rows %s 6'%\
len(r))
self.assertTrue(cur.rowcount in (-1, 6))
# next recordset (no data)
s = cur.nextset()
self.assertTrue(s, 'nextset return false neg')
self.assertRaises(rdbhdb.Error, cur.fetchall)
self.assertTrue(cur.rowcount == -1, cur.rowcount)
self.assertTrue(not cur._records)
# next recordset (100-6 recs)
s = cur.nextset()
self.assertTrue(s, 'nextset return false neg')
r = cur.fetchmany(100-6)
self.assertTrue(cur.rowcount >= 100-6, cur.rowcount)
self.assertTrue(len(r)==100-6, len(r))
# no more recordsets
s = cur.nextset()
self.assertTrue(not s, 'nextset return false pos at end of data')
self.assertRaises(rdbhdb.Error, cur.fetchall)
self.assertTrue(cur.rowcount == -1, cur.rowcount)
self.assertTrue(not cur._records)
finally:
con.close()
class test_nextset_ws(test_nextset):
connect_kw_args = {
'role': accounts.demo['role'],
'authcode': accounts.demo['authcode'],
'host': test_nextset.HOST,
'useWebsocket': True
}
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import mock
import os
import unittest
from mkdocs import nav, legacy
from mkdocs.exceptions import ConfigurationError
from mkdocs.tests.base import dedent
class SiteNavigationTests(unittest.TestCase):
def test_simple_toc(self):
pages = [
{'Home': 'index.md'},
{'About': 'about.md'}
]
expected = dedent("""
Home - /
About - /about/
""")
site_navigation = nav.SiteNavigation(pages)
self.assertEqual(str(site_navigation).strip(), expected)
self.assertEqual(len(site_navigation.nav_items), 2)
self.assertEqual(len(site_navigation.pages), 2)
def test_empty_toc_item(self):
pages = [
'index.md',
{'About': 'about.md'}
]
expected = dedent("""
Home - /
About - /about/
""")
site_navigation = nav.SiteNavigation(pages)
self.assertEqual(str(site_navigation).strip(), expected)
self.assertEqual(len(site_navigation.nav_items), 2)
self.assertEqual(len(site_navigation.pages), 2)
def test_indented_toc(self):
pages = [
{'Home': 'index.md'},
{'API Guide': [
{'Running': 'api-guide/running.md'},
{'Testing': 'api-guide/testing.md'},
{'Debugging': 'api-guide/debugging.md'},
]},
{'About': [
{'Release notes': 'about/release-notes.md'},
{'License': 'about/license.md'}
]}
]
expected = dedent("""
Home - /
API Guide
Running - /api-guide/running/
Testing - /api-guide/testing/
Debugging - /api-guide/debugging/
About
Release notes - /about/release-notes/
License - /about/license/
""")
site_navigation = nav.SiteNavigation(pages)
self.assertEqual(str(site_navigation).strip(), expected)
self.assertEqual(len(site_navigation.nav_items), 3)
self.assertEqual(len(site_navigation.pages), 6)
def test_nested_ungrouped(self):
pages = [
{'Home': 'index.md'},
{'Contact': 'about/contact.md'},
{'License Title': 'about/sub/license.md'},
]
expected = dedent("""
Home - /
Contact - /about/contact/
License Title - /about/sub/license/
""")
site_navigation = nav.SiteNavigation(pages)
self.assertEqual(str(site_navigation).strip(), expected)
self.assertEqual(len(site_navigation.nav_items), 3)
self.assertEqual(len(site_navigation.pages), 3)
def test_nested_ungrouped_no_titles(self):
pages = [
'index.md',
'about/contact.md',
'about/sub/license.md'
]
expected = dedent("""
Home - /
Contact - /about/contact/
License - /about/sub/license/
""")
site_navigation = nav.SiteNavigation(pages)
self.assertEqual(str(site_navigation).strip(), expected)
self.assertEqual(len(site_navigation.nav_items), 3)
self.assertEqual(len(site_navigation.pages), 3)
@mock.patch.object(os.path, 'sep', '\\')
def test_nested_ungrouped_no_titles_windows(self):
pages = [
'index.md',
'about\\contact.md',
'about\\sub\\license.md',
]
expected = dedent("""
Home - /
Contact - /about/contact/
License - /about/sub/license/
""")
site_navigation = nav.SiteNavigation(pages)
self.assertEqual(str(site_navigation).strip(), expected)
self.assertEqual(len(site_navigation.nav_items), 3)
self.assertEqual(len(site_navigation.pages), 3)
def test_walk_simple_toc(self):
pages = [
{'Home': 'index.md'},
{'About': 'about.md'}
]
expected = [
dedent("""
Home - / [*]
About - /about/
"""),
dedent("""
Home - /
About - /about/ [*]
""")
]
site_navigation = nav.SiteNavigation(pages)
for index, page in enumerate(site_navigation.walk_pages()):
self.assertEqual(str(site_navigation).strip(), expected[index])
def test_walk_empty_toc(self):
pages = [
'index.md',
{'About': 'about.md'}
]
expected = [
dedent("""
Home - / [*]
About - /about/
"""),
dedent("""
Home - /
About - /about/ [*]
""")
]
site_navigation = nav.SiteNavigation(pages)
for index, page in enumerate(site_navigation.walk_pages()):
self.assertEqual(str(site_navigation).strip(), expected[index])
def test_walk_indented_toc(self):
pages = [
{'Home': 'index.md'},
{'API Guide': [
{'Running': 'api-guide/running.md'},
{'Testing': 'api-guide/testing.md'},
{'Debugging': 'api-guide/debugging.md'},
]},
{'About': [
{'Release notes': 'about/release-notes.md'},
{'License': 'about/license.md'}
]}
]
expected = [
dedent("""
Home - / [*]
API Guide
Running - /api-guide/running/
Testing - /api-guide/testing/
Debugging - /api-guide/debugging/
About
Release notes - /about/release-notes/
License - /about/license/
"""),
dedent("""
Home - /
API Guide [*]
Running - /api-guide/running/ [*]
Testing - /api-guide/testing/
Debugging - /api-guide/debugging/
About
Release notes - /about/release-notes/
License - /about/license/
"""),
dedent("""
Home - /
API Guide [*]
Running - /api-guide/running/
Testing - /api-guide/testing/ [*]
Debugging - /api-guide/debugging/
About
Release notes - /about/release-notes/
License - /about/license/
"""),
dedent("""
Home - /
API Guide [*]
Running - /api-guide/running/
Testing - /api-guide/testing/
Debugging - /api-guide/debugging/ [*]
About
Release notes - /about/release-notes/
License - /about/license/
"""),
dedent("""
Home - /
API Guide
Running - /api-guide/running/
Testing - /api-guide/testing/
Debugging - /api-guide/debugging/
About [*]
Release notes - /about/release-notes/ [*]
License - /about/license/
"""),
dedent("""
Home - /
API Guide
Running - /api-guide/running/
Testing - /api-guide/testing/
Debugging - /api-guide/debugging/
About [*]
Release notes - /about/release-notes/
License - /about/license/ [*]
""")
]
site_navigation = nav.SiteNavigation(pages)
for index, page in enumerate(site_navigation.walk_pages()):
self.assertEqual(str(site_navigation).strip(), expected[index])
def test_base_url(self):
pages = [
'index.md'
]
site_navigation = nav.SiteNavigation(pages, use_directory_urls=False)
base_url = site_navigation.url_context.make_relative('/')
self.assertEqual(base_url, '.')
def test_relative_md_links_have_slash(self):
pages = [
'index.md',
'user-guide/styling-your-docs.md'
]
site_navigation = nav.SiteNavigation(pages, use_directory_urls=False)
site_navigation.url_context.base_path = "/user-guide/configuration"
url = site_navigation.url_context.make_relative('/user-guide/styling-your-docs/')
self.assertEqual(url, '../styling-your-docs/')
def test_generate_site_navigation(self):
"""
Verify inferring page titles based on the filename
"""
pages = [
'index.md',
'api-guide/running.md',
'about/notes.md',
'about/sub/license.md',
]
url_context = nav.URLContext()
nav_items, pages = nav._generate_site_navigation(pages, url_context)
self.assertEqual([n.title for n in nav_items],
['Home', 'Running', 'Notes', 'License'])
self.assertEqual([n.url for n in nav_items], [
'.',
'api-guide/running/',
'about/notes/',
'about/sub/license/'
])
self.assertEqual([p.title for p in pages],
['Home', 'Running', 'Notes', 'License'])
@mock.patch.object(os.path, 'sep', '\\')
def test_generate_site_navigation_windows(self):
"""
Verify inferring page titles based on the filename with a windows path
"""
pages = [
'index.md',
'api-guide\\running.md',
'about\\notes.md',
'about\\sub\\license.md',
]
url_context = nav.URLContext()
nav_items, pages = nav._generate_site_navigation(pages, url_context)
self.assertEqual([n.title for n in nav_items],
['Home', 'Running', 'Notes', 'License'])
self.assertEqual([n.url for n in nav_items], [
'.',
'api-guide/running/',
'about/notes/',
'about/sub/license/'
])
self.assertEqual([p.title for p in pages],
['Home', 'Running', 'Notes', 'License'])
def test_force_abs_urls(self):
"""
Verify force absolute URLs
"""
pages = [
'index.md',
'api-guide/running.md',
'about/notes.md',
'about/sub/license.md',
]
url_context = nav.URLContext()
url_context.force_abs_urls = True
nav_items, pages = nav._generate_site_navigation(pages, url_context)
self.assertEqual([n.title for n in nav_items],
['Home', 'Running', 'Notes', 'License'])
self.assertEqual([n.url for n in nav_items], [
'/',
'/api-guide/running/',
'/about/notes/',
'/about/sub/license/'
])
def test_force_abs_urls_with_base(self):
"""
Verify force absolute URLs
"""
pages = [
'index.md',
'api-guide/running.md',
'about/notes.md',
'about/sub/license.md',
]
url_context = nav.URLContext()
url_context.force_abs_urls = True
url_context.base_path = '/foo/'
nav_items, pages = nav._generate_site_navigation(pages, url_context)
self.assertEqual([n.title for n in nav_items],
['Home', 'Running', 'Notes', 'License'])
self.assertEqual([n.url for n in nav_items], [
'/foo/',
'/foo/api-guide/running/',
'/foo/about/notes/',
'/foo/about/sub/license/'
])
def test_invalid_pages_config(self):
bad_pages = [
set(), # should be dict or string only
{"a": "index.md", "b": "index.md"} # extra key
]
for bad_page in bad_pages:
def _test():
return nav._generate_site_navigation((bad_page, ), None)
self.assertRaises(ConfigurationError, _test)
def test_pages_config(self):
bad_page = {} # empty
def _test():
return nav._generate_site_navigation((bad_page, ), None)
self.assertRaises(ConfigurationError, _test)
def test_ancestors(self):
pages = [
{'Home': 'index.md'},
{'API Guide': [
{'Running': 'api-guide/running.md'},
{'Testing': 'api-guide/testing.md'},
{'Debugging': 'api-guide/debugging.md'},
{'Advanced': [
{'Part 1': 'api-guide/advanced/part-1.md'},
]},
]},
{'About': [
{'Release notes': 'about/release-notes.md'},
{'License': 'about/license.md'}
]}
]
site_navigation = nav.SiteNavigation(pages)
ancestors = (
[],
[site_navigation.nav_items[1]],
[site_navigation.nav_items[1]],
[site_navigation.nav_items[1]],
[site_navigation.nav_items[1],
site_navigation.pages[4].ancestors[-1]],
[site_navigation.nav_items[2]],
[site_navigation.nav_items[2]],
)
self.assertEqual(len(site_navigation.pages), len(ancestors))
for i, (page, expected_ancestor) in enumerate(
zip(site_navigation.pages, ancestors)):
self.assertEqual(page.ancestors, expected_ancestor,
"Failed on ancestor test {0}".format(i))
def test_nesting(self):
pages_config = [
{'Home': 'index.md'},
{'Install': [
{'Pre-install': 'install/install-pre.md'},
{'The install': 'install/install-actual.md'},
{'Post install': 'install/install-post.md'},
]},
{'Guide': [
{'Tutorial': [
{'Getting Started': 'guide/tutorial/running.md'},
{'Advanced Features': 'guide/tutorial/testing.md'},
{'Further Reading': 'guide/tutorial/debugging.md'},
]},
{'API Reference': [
{'Feature 1': 'guide/api-ref/running.md'},
{'Feature 2': 'guide/api-ref/testing.md'},
{'Feature 3': 'guide/api-ref/debugging.md'},
]},
{'Testing': 'guide/testing.md'},
{'Deploying': 'guide/deploying.md'},
]}
]
site_navigation = nav.SiteNavigation(pages_config)
self.assertEqual([n.title for n in site_navigation.nav_items],
['Home', 'Install', 'Guide'])
self.assertEqual(len(site_navigation.pages), 12)
expected = dedent("""
Home - /
Install
Pre-install - /install/install-pre/
The install - /install/install-actual/
Post install - /install/install-post/
Guide
Tutorial
Getting Started - /guide/tutorial/running/
Advanced Features - /guide/tutorial/testing/
Further Reading - /guide/tutorial/debugging/
API Reference
Feature 1 - /guide/api-ref/running/
Feature 2 - /guide/api-ref/testing/
Feature 3 - /guide/api-ref/debugging/
Testing - /guide/testing/
Deploying - /guide/deploying/
""")
self.maxDiff = None
self.assertEqual(str(site_navigation).strip(), expected)
class TestLegacyPagesConfig(unittest.TestCase):
def test_walk_simple_toc(self):
pages = legacy.pages_compat_shim([
('index.md', 'Home'),
('about.md', 'About')
])
expected = [
dedent("""
Home - / [*]
About - /about/
"""),
dedent("""
Home - /
About - /about/ [*]
""")
]
site_navigation = nav.SiteNavigation(pages)
for index, page in enumerate(site_navigation.walk_pages()):
self.assertEqual(str(site_navigation).strip(), expected[index])
def test_walk_empty_toc(self):
pages = legacy.pages_compat_shim([
('index.md',),
('about.md', 'About')
])
expected = [
dedent("""
Home - / [*]
About - /about/
"""),
dedent("""
Home - /
About - /about/ [*]
""")
]
site_navigation = nav.SiteNavigation(pages)
for index, page in enumerate(site_navigation.walk_pages()):
self.assertEqual(str(site_navigation).strip(), expected[index])
def test_walk_indented_toc(self):
pages = legacy.pages_compat_shim([
('index.md', 'Home'),
('api-guide/running.md', 'API Guide', 'Running'),
('api-guide/testing.md', 'API Guide', 'Testing'),
('api-guide/debugging.md', 'API Guide', 'Debugging'),
('about/release-notes.md', 'About', 'Release notes'),
('about/license.md', 'About', 'License')
])
expected = [
dedent("""
Home - / [*]
API Guide
Running - /api-guide/running/
Testing - /api-guide/testing/
Debugging - /api-guide/debugging/
About
Release notes - /about/release-notes/
License - /about/license/
"""),
dedent("""
Home - /
API Guide [*]
Running - /api-guide/running/ [*]
Testing - /api-guide/testing/
Debugging - /api-guide/debugging/
About
Release notes - /about/release-notes/
License - /about/license/
"""),
dedent("""
Home - /
API Guide [*]
Running - /api-guide/running/
Testing - /api-guide/testing/ [*]
Debugging - /api-guide/debugging/
About
Release notes - /about/release-notes/
License - /about/license/
"""),
dedent("""
Home - /
API Guide [*]
Running - /api-guide/running/
Testing - /api-guide/testing/
Debugging - /api-guide/debugging/ [*]
About
Release notes - /about/release-notes/
License - /about/license/
"""),
dedent("""
Home - /
API Guide
Running - /api-guide/running/
Testing - /api-guide/testing/
Debugging - /api-guide/debugging/
About [*]
Release notes - /about/release-notes/ [*]
License - /about/license/
"""),
dedent("""
Home - /
API Guide
Running - /api-guide/running/
Testing - /api-guide/testing/
Debugging - /api-guide/debugging/
About [*]
Release notes - /about/release-notes/
License - /about/license/ [*]
""")
]
site_navigation = nav.SiteNavigation(pages)
for index, page in enumerate(site_navigation.walk_pages()):
self.assertEqual(str(site_navigation).strip(), expected[index])
def test_indented_toc_missing_child_title(self):
pages = legacy.pages_compat_shim([
('index.md', 'Home'),
('api-guide/running.md', 'API Guide', 'Running'),
('api-guide/testing.md', 'API Guide'),
('api-guide/debugging.md', 'API Guide', 'Debugging'),
('about/release-notes.md', 'About', 'Release notes'),
('about/license.md', 'About', 'License')
])
expected = dedent("""
Home - /
API Guide
Running - /api-guide/running/
Testing - /api-guide/testing/
Debugging - /api-guide/debugging/
About
Release notes - /about/release-notes/
License - /about/license/
""")
site_navigation = nav.SiteNavigation(pages)
self.assertEqual(str(site_navigation).strip(), expected)
self.assertEqual(len(site_navigation.nav_items), 3)
self.assertEqual(len(site_navigation.pages), 6)
def test_edit_uri(self):
"""
Ensure that set_edit_url creates well formed URLs for edit_uri
"""
pages = [
'index.md',
'internal.md',
'sub/internal.md',
'sub1/sub2/internal.md',
]
# Basic test
repo_url = 'http://example.com/'
edit_uri = 'edit/master/docs/'
site_navigation = nav.SiteNavigation(pages)
expected_results = (
repo_url + edit_uri + pages[0],
repo_url + edit_uri + pages[1],
repo_url + edit_uri + pages[2],
repo_url + edit_uri + pages[3],
)
for idx, page in enumerate(site_navigation.walk_pages()):
page.set_edit_url(repo_url, edit_uri)
self.assertEqual(page.edit_url, expected_results[idx])
# Ensure the '/' is added to the repo_url and edit_uri
repo_url = 'http://example.com'
edit_uri = 'edit/master/docs'
site_navigation = nav.SiteNavigation(pages)
for idx, page in enumerate(site_navigation.walk_pages()):
page.set_edit_url(repo_url, edit_uri)
self.assertEqual(page.edit_url, expected_results[idx])
# Ensure query strings are supported
repo_url = 'http://example.com'
edit_uri = '?query=edit/master/docs/'
site_navigation = nav.SiteNavigation(pages)
expected_results = (
repo_url + edit_uri + pages[0],
repo_url + edit_uri + pages[1],
repo_url + edit_uri + pages[2],
repo_url + edit_uri + pages[3],
)
for idx, page in enumerate(site_navigation.walk_pages()):
page.set_edit_url(repo_url, edit_uri)
self.assertEqual(page.edit_url, expected_results[idx])
# Ensure fragment strings are supported
repo_url = 'http://example.com'
edit_uri = '#fragment/edit/master/docs/'
site_navigation = nav.SiteNavigation(pages)
expected_results = (
repo_url + edit_uri + pages[0],
repo_url + edit_uri + pages[1],
repo_url + edit_uri + pages[2],
repo_url + edit_uri + pages[3],
)
for idx, page in enumerate(site_navigation.walk_pages()):
page.set_edit_url(repo_url, edit_uri)
self.assertEqual(page.edit_url, expected_results[idx])
def test_edit_uri_windows(self):
"""
Ensure that set_edit_url creates well formed URLs for edit_uri with a windows path
"""
pages = [
'index.md',
'internal.md',
'sub\\internal.md',
'sub1\\sub2\\internal.md',
]
# Basic test
repo_url = 'http://example.com/'
edit_uri = 'edit/master/docs/'
site_navigation = nav.SiteNavigation(pages)
expected_results = (
repo_url + edit_uri + pages[0],
repo_url + edit_uri + pages[1],
repo_url + edit_uri + pages[2].replace('\\', '/'),
repo_url + edit_uri + pages[3].replace('\\', '/'),
)
for idx, page in enumerate(site_navigation.walk_pages()):
page.set_edit_url(repo_url, edit_uri)
self.assertEqual(page.edit_url, expected_results[idx])
# Ensure the '/' is added to the repo_url and edit_uri
repo_url = 'http://example.com'
edit_uri = 'edit/master/docs'
site_navigation = nav.SiteNavigation(pages)
for idx, page in enumerate(site_navigation.walk_pages()):
page.set_edit_url(repo_url, edit_uri)
self.assertEqual(page.edit_url, expected_results[idx])
# Ensure query strings are supported
repo_url = 'http://example.com'
edit_uri = '?query=edit/master/docs/'
site_navigation = nav.SiteNavigation(pages)
expected_results = (
repo_url + edit_uri + pages[0],
repo_url + edit_uri + pages[1],
repo_url + edit_uri + pages[2].replace('\\', '/'),
repo_url + edit_uri + pages[3].replace('\\', '/'),
)
for idx, page in enumerate(site_navigation.walk_pages()):
page.set_edit_url(repo_url, edit_uri)
self.assertEqual(page.edit_url, expected_results[idx])
# Ensure fragment strings are supported
repo_url = 'http://example.com'
edit_uri = '#fragment/edit/master/docs/'
site_navigation = nav.SiteNavigation(pages)
expected_results = (
repo_url + edit_uri + pages[0],
repo_url + edit_uri + pages[1],
repo_url + edit_uri + pages[2].replace('\\', '/'),
repo_url + edit_uri + pages[3].replace('\\', '/'),
)
for idx, page in enumerate(site_navigation.walk_pages()):
page.set_edit_url(repo_url, edit_uri)
self.assertEqual(page.edit_url, expected_results[idx])
|
|
# Copyright 2009-Present The Graphite Development Team
# Copyright 2008 Orbitz WorldWide
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This module is an implementation of the Whisper database API
# Here is the basic layout of a whisper data file
#
# File = Header,Data
# Header = Metadata,ArchiveInfo+
# Metadata = aggregationType,maxRetention,xFilesFactor,archiveCount
# ArchiveInfo = Offset,SecondsPerPoint,Points
# Data = Archive+
# Archive = Point+
# Point = timestamp,value
import itertools
import operator
import os
import platform
import re
import struct
import sys
import time
izip = getattr(itertools, 'izip', zip)
ifilter = getattr(itertools, 'ifilter', filter)
if sys.version_info >= (3, 0):
xrange = range
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
try:
import ctypes
import ctypes.util
CAN_FALLOCATE = True
except ImportError:
CAN_FALLOCATE = False
try:
if sys.version_info >= (3, 0):
from os import posix_fadvise, POSIX_FADV_RANDOM
else:
from fadvise import posix_fadvise, POSIX_FADV_RANDOM
CAN_FADVISE = True
except ImportError:
CAN_FADVISE = False
fallocate = None
if CAN_FALLOCATE:
libc_name = ctypes.util.find_library('c')
libc = ctypes.CDLL(libc_name)
c_off64_t = ctypes.c_int64
c_off_t = ctypes.c_int
if platform.uname()[0] == 'FreeBSD':
# offset type is 64-bit on FreeBSD 32-bit & 64-bit platforms to address files more than 2GB
c_off_t = ctypes.c_int64
try:
_fallocate = libc.posix_fallocate64
_fallocate.restype = ctypes.c_int
_fallocate.argtypes = [ctypes.c_int, c_off64_t, c_off64_t]
except AttributeError:
try:
_fallocate = libc.posix_fallocate
_fallocate.restype = ctypes.c_int
_fallocate.argtypes = [ctypes.c_int, c_off_t, c_off_t]
except AttributeError:
CAN_FALLOCATE = False
if CAN_FALLOCATE:
def _py_fallocate(fd, offset, len_):
res = _fallocate(fd.fileno(), offset, len_)
if res != 0:
raise IOError(res, 'fallocate')
fallocate = _py_fallocate
del libc
del libc_name
LOCK = False
CACHE_HEADERS = False
AUTOFLUSH = False
FADVISE_RANDOM = False
# Buffering setting applied to all operations that do *not* require
# a full scan of the file in order to minimize cache thrashing.
BUFFERING = 0
__headerCache = {}
longFormat = "!L"
longSize = struct.calcsize(longFormat)
floatFormat = "!f"
floatSize = struct.calcsize(floatFormat)
valueFormat = "!d"
valueSize = struct.calcsize(valueFormat)
pointFormat = "!Ld"
pointSize = struct.calcsize(pointFormat)
metadataFormat = "!2LfL"
metadataSize = struct.calcsize(metadataFormat)
archiveInfoFormat = "!3L"
archiveInfoSize = struct.calcsize(archiveInfoFormat)
aggregationTypeToMethod = dict({
1: 'average',
2: 'sum',
3: 'last',
4: 'max',
5: 'min',
6: 'avg_zero',
7: 'absmax',
8: 'absmin'
})
aggregationMethodToType = dict([[v, k] for k, v in aggregationTypeToMethod.items()])
aggregationMethods = aggregationTypeToMethod.values()
debug = startBlock = endBlock = lambda *a, **k: None
UnitMultipliers = {
'seconds': 1,
'minutes': 60,
'hours': 3600,
'days': 86400,
'weeks': 86400 * 7,
'years': 86400 * 365
}
def getUnitString(s):
for value in ('seconds', 'minutes', 'hours', 'days', 'weeks', 'years'):
if value.startswith(s):
return value
raise ValueError("Invalid unit '%s'" % s)
def parseRetentionDef(retentionDef):
try:
(precision, points) = retentionDef.strip().split(':', 1)
except ValueError:
raise ValueError("Invalid retention definition '%s'" % retentionDef)
if precision.isdigit():
precision = int(precision) * UnitMultipliers[getUnitString('s')]
else:
precision_re = re.compile(r'^(\d+)([a-z]+)$')
match = precision_re.match(precision)
if match:
precision = int(match.group(1)) * UnitMultipliers[getUnitString(match.group(2))]
else:
raise ValueError("Invalid precision specification '%s'" % precision)
if points.isdigit():
points = int(points)
else:
points_re = re.compile(r'^(\d+)([a-z]+)$')
match = points_re.match(points)
if match:
points = int(match.group(1)) * UnitMultipliers[getUnitString(match.group(2))] // precision
else:
raise ValueError("Invalid retention specification '%s'" % points)
return (precision, points)
class WhisperException(Exception):
"""Base class for whisper exceptions."""
class InvalidConfiguration(WhisperException):
"""Invalid configuration."""
class InvalidAggregationMethod(WhisperException):
"""Invalid aggregation method."""
class InvalidTimeInterval(WhisperException):
"""Invalid time interval."""
class InvalidXFilesFactor(WhisperException):
"""Invalid xFilesFactor."""
class TimestampNotCovered(WhisperException):
"""Timestamp not covered by any archives in this database."""
class CorruptWhisperFile(WhisperException):
def __init__(self, error, path):
Exception.__init__(self, error)
self.error = error
self.path = path
def __repr__(self):
return "<CorruptWhisperFile[%s] %s>" % (self.path, self.error)
def __str__(self):
return "%s (%s)" % (self.error, self.path)
def disableDebug():
""" Disable writing IO statistics to stdout """
global open
try:
open = _open
except NameError:
pass
def enableDebug():
""" Enable writing IO statistics to stdout """
global open, _open, debug, startBlock, endBlock
_open = open
class open(object):
def __init__(self, *args, **kwargs):
self.f = _open(*args, **kwargs)
self.writeCount = 0
self.readCount = 0
def __enter__(self):
return self
def __exit__(self, *args):
self.f.close()
def write(self, data):
self.writeCount += 1
debug('WRITE %d bytes #%d' % (len(data), self.writeCount))
return self.f.write(data)
def read(self, size):
self.readCount += 1
debug('READ %d bytes #%d' % (size, self.readCount))
return self.f.read(size)
def __getattr__(self, attr):
return getattr(self.f, attr)
def debug(message):
print('DEBUG :: %s' % message)
__timingBlocks = {}
def startBlock(name):
__timingBlocks[name] = time.time()
def endBlock(name):
debug("%s took %.5f seconds" % (name, time.time() - __timingBlocks.pop(name)))
def __readHeader(fh):
if CACHE_HEADERS:
info = __headerCache.get(fh.name)
if info:
return info
originalOffset = fh.tell()
fh.seek(0)
packedMetadata = fh.read(metadataSize)
try:
(aggregationType, maxRetention, xff, archiveCount) \
= struct.unpack(metadataFormat, packedMetadata)
except (struct.error, ValueError, TypeError):
raise CorruptWhisperFile("Unable to read header", fh.name)
try:
aggregationTypeToMethod[aggregationType]
except KeyError:
raise CorruptWhisperFile("Unable to read header", fh.name)
if not 0 <= xff <= 1:
raise CorruptWhisperFile("Unable to read header", fh.name)
archives = []
for i in xrange(archiveCount):
packedArchiveInfo = fh.read(archiveInfoSize)
try:
(offset, secondsPerPoint, points) = struct.unpack(archiveInfoFormat, packedArchiveInfo)
except (struct.error, ValueError, TypeError):
raise CorruptWhisperFile("Unable to read archive%d metadata" % i, fh.name)
archiveInfo = {
'offset': offset,
'secondsPerPoint': secondsPerPoint,
'points': points,
'retention': secondsPerPoint * points,
'size': points * pointSize,
}
archives.append(archiveInfo)
fh.seek(originalOffset)
info = {
'aggregationMethod': aggregationTypeToMethod.get(aggregationType, 'average'),
'maxRetention': maxRetention,
'xFilesFactor': xff,
'archives': archives,
}
if CACHE_HEADERS:
__headerCache[fh.name] = info
return info
def setXFilesFactor(path, xFilesFactor):
"""Sets the xFilesFactor for file in path
path is a string pointing to a whisper file
xFilesFactor is a float between 0 and 1
returns the old xFilesFactor
"""
(_, old_xff) = __setAggregation(path, xFilesFactor=xFilesFactor)
return old_xff
def setAggregationMethod(path, aggregationMethod, xFilesFactor=None):
"""Sets the aggregationMethod for file in path
path is a string pointing to the whisper file
aggregationMethod specifies the method to use when propagating data (see
``whisper.aggregationMethods``)
xFilesFactor specifies the fraction of data points in a propagation interval
that must have known values for a propagation to occur. If None, the
existing xFilesFactor in path will not be changed
returns the old aggregationMethod
"""
(old_agm, _) = __setAggregation(path, aggregationMethod, xFilesFactor)
return old_agm
def __setAggregation(path, aggregationMethod=None, xFilesFactor=None):
""" Set aggregationMethod and or xFilesFactor for file in path"""
with open(path, 'r+b', BUFFERING) as fh:
if LOCK:
fcntl.flock(fh.fileno(), fcntl.LOCK_EX)
info = __readHeader(fh)
if xFilesFactor is None:
xFilesFactor = info['xFilesFactor']
if aggregationMethod is None:
aggregationMethod = info['aggregationMethod']
__writeHeaderMetadata(fh, aggregationMethod, info['maxRetention'],
xFilesFactor, len(info['archives']))
if AUTOFLUSH:
fh.flush()
os.fsync(fh.fileno())
if CACHE_HEADERS and fh.name in __headerCache:
del __headerCache[fh.name]
return (info['aggregationMethod'], info['xFilesFactor'])
def __writeHeaderMetadata(fh, aggregationMethod, maxRetention, xFilesFactor, archiveCount):
""" Writes header metadata to fh """
try:
aggregationType = aggregationMethodToType[aggregationMethod]
except KeyError:
raise InvalidAggregationMethod("Unrecognized aggregation method: %s" %
aggregationMethod)
try:
xFilesFactor = float(xFilesFactor)
except ValueError:
raise InvalidXFilesFactor("Invalid xFilesFactor %s, not a float" %
xFilesFactor)
if xFilesFactor < 0 or xFilesFactor > 1:
raise InvalidXFilesFactor("Invalid xFilesFactor %s, not between 0 and 1" %
xFilesFactor)
aggregationType = struct.pack(longFormat, aggregationType)
maxRetention = struct.pack(longFormat, maxRetention)
xFilesFactor = struct.pack(floatFormat, xFilesFactor)
archiveCount = struct.pack(longFormat, archiveCount)
packedMetadata = aggregationType + maxRetention + xFilesFactor + archiveCount
fh.seek(0)
fh.write(packedMetadata)
def validateArchiveList(archiveList):
""" Validates an archiveList.
An ArchiveList must:
1. Have at least one archive config. Example: (60, 86400)
2. No archive may be a duplicate of another.
3. Higher precision archives' precision must evenly divide all lower
precision archives' precision.
4. Lower precision archives must cover larger time intervals than higher
precision archives.
5. Each archive must have at least enough points to consolidate to the next
archive
Returns True or False
"""
if not archiveList:
raise InvalidConfiguration("You must specify at least one archive configuration!")
archiveList.sort(key=lambda a: a[0]) # Sort by precision (secondsPerPoint)
for i, archive in enumerate(archiveList):
if i == len(archiveList) - 1:
break
nextArchive = archiveList[i + 1]
if not archive[0] < nextArchive[0]:
raise InvalidConfiguration(
"A Whisper database may not be configured having "
"two archives with the same precision (archive%d: %s, archive%d: %s)" %
(i, archive, i + 1, nextArchive))
if nextArchive[0] % archive[0] != 0:
raise InvalidConfiguration(
"Higher precision archives' precision "
"must evenly divide all lower precision archives' precision "
"(archive%d: %s, archive%d: %s)" %
(i, archive[0], i + 1, nextArchive[0]))
retention = archive[0] * archive[1]
nextRetention = nextArchive[0] * nextArchive[1]
if not nextRetention > retention:
raise InvalidConfiguration(
"Lower precision archives must cover "
"larger time intervals than higher precision archives "
"(archive%d: %s seconds, archive%d: %s seconds)" %
(i, retention, i + 1, nextRetention))
archivePoints = archive[1]
pointsPerConsolidation = nextArchive[0] // archive[0]
if not archivePoints >= pointsPerConsolidation:
raise InvalidConfiguration(
"Each archive must have at least enough points "
"to consolidate to the next archive (archive%d consolidates %d of "
"archive%d's points but it has only %d total points)" %
(i + 1, pointsPerConsolidation, i, archivePoints))
def create(path, archiveList, xFilesFactor=None, aggregationMethod=None,
sparse=False, useFallocate=False):
"""create(path,archiveList,xFilesFactor=0.5,aggregationMethod='average')
path is a string
archiveList is a list of archives, each of which is of the form
(secondsPerPoint, numberOfPoints)
xFilesFactor specifies the fraction of data points in a propagation interval
that must have known values for a propagation to occur
aggregationMethod specifies the function to use when propagating data (see
``whisper.aggregationMethods``)
"""
# Set default params
if xFilesFactor is None:
xFilesFactor = 0.5
if aggregationMethod is None:
aggregationMethod = 'average'
# Validate archive configurations...
validateArchiveList(archiveList)
# Looks good, now we create the file and write the header
if os.path.exists(path):
raise InvalidConfiguration("File %s already exists!" % path)
with open(path, 'wb', BUFFERING) as fh:
try:
if LOCK:
fcntl.flock(fh.fileno(), fcntl.LOCK_EX)
if CAN_FADVISE and FADVISE_RANDOM:
posix_fadvise(fh.fileno(), 0, 0, POSIX_FADV_RANDOM)
oldest = max([secondsPerPoint * points for secondsPerPoint, points in archiveList])
__writeHeaderMetadata(fh, aggregationMethod, oldest, xFilesFactor,
len(archiveList))
headerSize = metadataSize + (archiveInfoSize * len(archiveList))
archiveOffsetPointer = headerSize
for secondsPerPoint, points in archiveList:
archiveInfo = struct.pack(archiveInfoFormat, archiveOffsetPointer, secondsPerPoint, points)
fh.write(archiveInfo)
archiveOffsetPointer += (points * pointSize)
# If configured to use fallocate and capable of fallocate use that, else
# attempt sparse if configure or zero pre-allocate if sparse isn't configured.
if CAN_FALLOCATE and useFallocate:
remaining = archiveOffsetPointer - headerSize
fallocate(fh, headerSize, remaining)
elif sparse:
fh.seek(archiveOffsetPointer - 1)
fh.write(b'\x00')
else:
remaining = archiveOffsetPointer - headerSize
chunksize = 16384
zeroes = b'\x00' * chunksize
while remaining > chunksize:
fh.write(zeroes)
remaining -= chunksize
fh.write(zeroes[:remaining])
if AUTOFLUSH:
fh.flush()
os.fsync(fh.fileno())
# Explicitly close the file to catch IOError on close()
fh.close()
except IOError:
# if we got an IOError above, the file is either empty or half created.
# Better off deleting it to avoid surprises later
os.unlink(fh.name)
raise
def aggregate(aggregationMethod, knownValues, neighborValues=None):
if aggregationMethod == 'average':
return float(sum(knownValues)) / float(len(knownValues))
elif aggregationMethod == 'sum':
return float(sum(knownValues))
elif aggregationMethod == 'last':
return knownValues[-1]
elif aggregationMethod == 'max':
return max(knownValues)
elif aggregationMethod == 'min':
return min(knownValues)
elif aggregationMethod == 'avg_zero':
if not neighborValues:
raise InvalidAggregationMethod("Using avg_zero without neighborValues")
values = [x or 0 for x in neighborValues]
return float(sum(values)) / float(len(values))
elif aggregationMethod == 'absmax':
return max(knownValues, key=abs)
elif aggregationMethod == 'absmin':
return min(knownValues, key=abs)
else:
raise InvalidAggregationMethod(
"Unrecognized aggregation method %s" % aggregationMethod)
def __propagate(fh, header, timestamp, higher, lower):
aggregationMethod = header['aggregationMethod']
xff = header['xFilesFactor']
lowerIntervalStart = timestamp - (timestamp % lower['secondsPerPoint'])
fh.seek(higher['offset'])
packedPoint = fh.read(pointSize)
try:
(higherBaseInterval, higherBaseValue) = struct.unpack(pointFormat, packedPoint)
except struct.error:
raise CorruptWhisperFile("Unable to read base datapoint", fh.name)
if higherBaseInterval == 0:
higherFirstOffset = higher['offset']
else:
timeDistance = lowerIntervalStart - higherBaseInterval
pointDistance = timeDistance // higher['secondsPerPoint']
byteDistance = pointDistance * pointSize
higherFirstOffset = higher['offset'] + (byteDistance % higher['size'])
higherPoints = lower['secondsPerPoint'] // higher['secondsPerPoint']
higherSize = higherPoints * pointSize
relativeFirstOffset = higherFirstOffset - higher['offset']
relativeLastOffset = (relativeFirstOffset + higherSize) % higher['size']
higherLastOffset = relativeLastOffset + higher['offset']
fh.seek(higherFirstOffset)
if higherFirstOffset < higherLastOffset: # We don't wrap the archive
seriesString = fh.read(higherLastOffset - higherFirstOffset)
else: # We do wrap the archive
higherEnd = higher['offset'] + higher['size']
seriesString = fh.read(higherEnd - higherFirstOffset)
fh.seek(higher['offset'])
seriesString += fh.read(higherLastOffset - higher['offset'])
# Now we unpack the series data we just read
byteOrder, pointTypes = pointFormat[0], pointFormat[1:]
points = len(seriesString) // pointSize
seriesFormat = byteOrder + (pointTypes * points)
try:
unpackedSeries = struct.unpack(seriesFormat, seriesString)
except struct.error:
raise CorruptWhisperFile("Unable to read datapoints", fh.name)
# And finally we construct a list of values
neighborValues = [None] * points
currentInterval = lowerIntervalStart
step = higher['secondsPerPoint']
for i in xrange(0, len(unpackedSeries), 2):
pointTime = unpackedSeries[i]
if pointTime == currentInterval:
neighborValues[i // 2] = unpackedSeries[i + 1]
currentInterval += step
# Propagate aggregateValue to propagate from neighborValues if we have enough known points
knownValues = [v for v in neighborValues if v is not None]
if not knownValues:
return False
knownPercent = float(len(knownValues)) / float(len(neighborValues))
if knownPercent >= xff: # We have enough data to propagate a value!
aggregateValue = aggregate(aggregationMethod, knownValues, neighborValues)
myPackedPoint = struct.pack(pointFormat, lowerIntervalStart, aggregateValue)
fh.seek(lower['offset'])
packedPoint = fh.read(pointSize)
try:
(lowerBaseInterval, lowerBaseValue) = struct.unpack(pointFormat, packedPoint)
except struct.error:
raise CorruptWhisperFile("Unable to read base datapoint", fh.name)
if lowerBaseInterval == 0: # First propagated update to this lower archive
fh.seek(lower['offset'])
fh.write(myPackedPoint)
else: # Not our first propagated update to this lower archive
timeDistance = lowerIntervalStart - lowerBaseInterval
pointDistance = timeDistance // lower['secondsPerPoint']
byteDistance = pointDistance * pointSize
lowerOffset = lower['offset'] + (byteDistance % lower['size'])
fh.seek(lowerOffset)
fh.write(myPackedPoint)
return True
else:
return False
def update(path, value, timestamp=None, now=None):
"""
update(path, value, timestamp=None)
path is a string
value is a float
timestamp is either an int or float
"""
value = float(value)
with open(path, 'r+b', BUFFERING) as fh:
if CAN_FADVISE and FADVISE_RANDOM:
posix_fadvise(fh.fileno(), 0, 0, POSIX_FADV_RANDOM)
return file_update(fh, value, timestamp, now)
def file_update(fh, value, timestamp, now=None):
if LOCK:
fcntl.flock(fh.fileno(), fcntl.LOCK_EX)
header = __readHeader(fh)
if now is None:
now = int(time.time())
if timestamp is None:
timestamp = now
timestamp = int(timestamp)
diff = now - timestamp
if not ((diff < header['maxRetention']) and diff >= 0):
raise TimestampNotCovered(
"Timestamp not covered by any archives in this database.")
# Find the highest-precision archive that covers timestamp
for i, archive in enumerate(header['archives']):
if archive['retention'] < diff:
continue
# We'll pass on the update to these lower precision archives later
lowerArchives = header['archives'][i + 1:]
break
# First we update the highest-precision archive
myInterval = timestamp - (timestamp % archive['secondsPerPoint'])
myPackedPoint = struct.pack(pointFormat, myInterval, value)
fh.seek(archive['offset'])
packedPoint = fh.read(pointSize)
try:
(baseInterval, baseValue) = struct.unpack(pointFormat, packedPoint)
except struct.error:
raise CorruptWhisperFile("Unable to read base datapoint", fh.name)
if baseInterval == 0: # This file's first update
fh.seek(archive['offset'])
fh.write(myPackedPoint)
baseInterval = myInterval
else: # Not our first update
timeDistance = myInterval - baseInterval
pointDistance = timeDistance // archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
myOffset = archive['offset'] + (byteDistance % archive['size'])
fh.seek(myOffset)
fh.write(myPackedPoint)
# Now we propagate the update to lower-precision archives
higher = archive
for lower in lowerArchives:
if not __propagate(fh, header, myInterval, higher, lower):
break
higher = lower
if AUTOFLUSH:
fh.flush()
os.fsync(fh.fileno())
def update_many(path, points, now=None):
"""update_many(path,points)
path is a string
points is a list of (timestamp,value) points
"""
if not points:
return
points = [(int(t), float(v)) for (t, v) in points]
points.sort(key=lambda p: p[0], reverse=True) # Order points by timestamp, newest first
with open(path, 'r+b', BUFFERING) as fh:
if CAN_FADVISE and FADVISE_RANDOM:
posix_fadvise(fh.fileno(), 0, 0, POSIX_FADV_RANDOM)
return file_update_many(fh, points, now)
def file_update_many(fh, points, now=None):
if LOCK:
fcntl.flock(fh.fileno(), fcntl.LOCK_EX)
header = __readHeader(fh)
if now is None:
now = int(time.time())
archives = iter(header['archives'])
currentArchive = next(archives)
currentPoints = []
for point in points:
age = now - point[0]
while currentArchive['retention'] < age: # We can't fit any more points in this archive
if currentPoints: # Commit all the points we've found that it can fit
currentPoints.reverse() # Put points in chronological order
__archive_update_many(fh, header, currentArchive, currentPoints)
currentPoints = []
try:
currentArchive = next(archives)
except StopIteration:
currentArchive = None
break
if not currentArchive:
break # Drop remaining points that don't fit in the database
currentPoints.append(point)
# Don't forget to commit after we've checked all the archives
if currentArchive and currentPoints:
currentPoints.reverse()
__archive_update_many(fh, header, currentArchive, currentPoints)
if AUTOFLUSH:
fh.flush()
os.fsync(fh.fileno())
def __archive_update_many(fh, header, archive, points):
step = archive['secondsPerPoint']
alignedPoints = [(timestamp - (timestamp % step), value)
for (timestamp, value) in points]
# Create a packed string for each contiguous sequence of points
packedStrings = []
previousInterval = None
currentString = b""
lenAlignedPoints = len(alignedPoints)
for i in xrange(0, lenAlignedPoints):
# Take last point in run of points with duplicate intervals
if i + 1 < lenAlignedPoints and alignedPoints[i][0] == alignedPoints[i + 1][0]:
continue
(interval, value) = alignedPoints[i]
if (not previousInterval) or (interval == previousInterval + step):
currentString += struct.pack(pointFormat, interval, value)
previousInterval = interval
else:
numberOfPoints = len(currentString) // pointSize
startInterval = previousInterval - (step * (numberOfPoints - 1))
packedStrings.append((startInterval, currentString))
currentString = struct.pack(pointFormat, interval, value)
previousInterval = interval
if currentString:
numberOfPoints = len(currentString) // pointSize
startInterval = previousInterval - (step * (numberOfPoints - 1))
packedStrings.append((startInterval, currentString))
# Read base point and determine where our writes will start
fh.seek(archive['offset'])
packedBasePoint = fh.read(pointSize)
try:
(baseInterval, baseValue) = struct.unpack(pointFormat, packedBasePoint)
except struct.error:
raise CorruptWhisperFile("Unable to read base datapoint", fh.name)
if baseInterval == 0: # This file's first update
# Use our first string as the base, so we start at the start
baseInterval = packedStrings[0][0]
# Write all of our packed strings in locations determined by the baseInterval
for (interval, packedString) in packedStrings:
timeDistance = interval - baseInterval
pointDistance = timeDistance // step
byteDistance = pointDistance * pointSize
myOffset = archive['offset'] + (byteDistance % archive['size'])
fh.seek(myOffset)
archiveEnd = archive['offset'] + archive['size']
bytesBeyond = (myOffset + len(packedString)) - archiveEnd
if bytesBeyond > 0:
fh.write(packedString[:-bytesBeyond])
assert fh.tell() == archiveEnd, (
"archiveEnd=%d fh.tell=%d bytesBeyond=%d len(packedString)=%d" %
(archiveEnd, fh.tell(), bytesBeyond, len(packedString))
)
fh.seek(archive['offset'])
# Safe because it can't exceed the archive (retention checking logic above)
fh.write(packedString[-bytesBeyond:])
else:
fh.write(packedString)
# Now we propagate the updates to lower-precision archives
higher = archive
lowerArchives = [arc for arc in header['archives']
if arc['secondsPerPoint'] > archive['secondsPerPoint']]
for lower in lowerArchives:
def fit(i):
return i - (i % lower['secondsPerPoint'])
lowerIntervals = [fit(p[0]) for p in alignedPoints]
uniqueLowerIntervals = set(lowerIntervals)
propagateFurther = False
for interval in uniqueLowerIntervals:
if __propagate(fh, header, interval, higher, lower):
propagateFurther = True
if not propagateFurther:
break
higher = lower
def info(path):
"""
info(path)
path is a string
"""
try:
with open(path, 'rb') as fh:
return __readHeader(fh)
except (IOError, OSError):
pass
return None
def fetch(path, fromTime, untilTime=None, now=None, archiveToSelect=None):
"""fetch(path,fromTime,untilTime=None,archiveToSelect=None)
path is a string
fromTime is an epoch time
untilTime is also an epoch time, but defaults to now.
archiveToSelect is the requested granularity, but defaults to None.
Returns a tuple of (timeInfo, valueList)
where timeInfo is itself a tuple of (fromTime, untilTime, step)
Returns None if no data can be returned
"""
with open(path, 'rb') as fh:
return file_fetch(fh, fromTime, untilTime, now, archiveToSelect)
def file_fetch(fh, fromTime, untilTime, now=None, archiveToSelect=None):
header = __readHeader(fh)
if now is None:
now = int(time.time())
if untilTime is None:
untilTime = now
fromTime = int(fromTime)
untilTime = int(untilTime)
# Here we try and be flexible and return as much data as we can.
# If the range of data is from too far in the past or fully in the future, we
# return nothing
if fromTime > untilTime:
raise InvalidTimeInterval(
"Invalid time interval: from time '%s' is after until time '%s'" %
(fromTime, untilTime))
oldestTime = now - header['maxRetention']
# Range is in the future
if fromTime > now:
return None
# Range is beyond retention
if untilTime < oldestTime:
return None
# Range requested is partially beyond retention, adjust
if fromTime < oldestTime:
fromTime = oldestTime
# Range is partially in the future, adjust
if untilTime > now:
untilTime = now
diff = now - fromTime
# Parse granularity if requested
if archiveToSelect:
retentionStr = str(archiveToSelect) + ":1"
archiveToSelect = parseRetentionDef(retentionStr)[0]
for archive in header['archives']:
if archiveToSelect:
if archive['secondsPerPoint'] == archiveToSelect:
break
archive = None
else:
if archive['retention'] >= diff:
break
if archiveToSelect and not archive:
raise ValueError("Invalid granularity: %s" % (archiveToSelect))
return __archive_fetch(fh, archive, fromTime, untilTime)
def __archive_fetch(fh, archive, fromTime, untilTime):
"""
Fetch data from a single archive. Note that checks for validity of the time
period requested happen above this level so it's possible to wrap around the
archive on a read and request data older than the archive's retention
"""
step = archive['secondsPerPoint']
fromInterval = int(fromTime - (fromTime % step)) + step
untilInterval = int(untilTime - (untilTime % step)) + step
if fromInterval == untilInterval:
# Zero-length time range: always include the next point
untilInterval += step
fh.seek(archive['offset'])
packedPoint = fh.read(pointSize)
try:
(baseInterval, baseValue) = struct.unpack(pointFormat, packedPoint)
except struct.error:
raise CorruptWhisperFile("Unable to read base datapoint", fh.name)
if baseInterval == 0:
points = (untilInterval - fromInterval) // step
timeInfo = (fromInterval, untilInterval, step)
valueList = [None] * points
return (timeInfo, valueList)
# Determine fromOffset
timeDistance = fromInterval - baseInterval
pointDistance = timeDistance // step
byteDistance = pointDistance * pointSize
fromOffset = archive['offset'] + (byteDistance % archive['size'])
# Determine untilOffset
timeDistance = untilInterval - baseInterval
pointDistance = timeDistance // step
byteDistance = pointDistance * pointSize
untilOffset = archive['offset'] + (byteDistance % archive['size'])
# Read all the points in the interval
fh.seek(fromOffset)
if fromOffset < untilOffset: # If we don't wrap around the archive
seriesString = fh.read(untilOffset - fromOffset)
else: # We do wrap around the archive, so we need two reads
archiveEnd = archive['offset'] + archive['size']
seriesString = fh.read(archiveEnd - fromOffset)
fh.seek(archive['offset'])
seriesString += fh.read(untilOffset - archive['offset'])
# Now we unpack the series data we just read (anything faster than unpack?)
byteOrder, pointTypes = pointFormat[0], pointFormat[1:]
points = len(seriesString) // pointSize
seriesFormat = byteOrder + (pointTypes * points)
try:
unpackedSeries = struct.unpack(seriesFormat, seriesString)
except struct.error:
raise CorruptWhisperFile("Unable to read datapoints", fh.name)
# And finally we construct a list of values (optimize this!)
valueList = [None] * points # Pre-allocate entire list for speed
currentInterval = fromInterval
for i in xrange(0, len(unpackedSeries), 2):
pointTime = unpackedSeries[i]
if pointTime == currentInterval:
pointValue = unpackedSeries[i + 1]
valueList[i // 2] = pointValue # In-place reassignment is faster than append()
currentInterval += step
timeInfo = (fromInterval, untilInterval, step)
return (timeInfo, valueList)
def merge(path_from, path_to, time_from=None, time_to=None, now=None):
""" Merges the data from one whisper file into another. Each file must have
the same archive configuration. time_from and time_to can optionally be
specified for the merge.
"""
# Python 2.7 will allow the following commented line
# with open(path_from, 'rb') as fh_from, open(path_to, 'rb+') as fh_to:
# But with Python 2.6 we need to use this (I prefer not to introduce
# contextlib.nested just for this):
with open(path_from, 'rb') as fh_from:
with open(path_to, 'rb+') as fh_to:
return file_merge(fh_from, fh_to, time_from, time_to, now)
def file_merge(fh_from, fh_to, time_from=None, time_to=None, now=None):
headerFrom = __readHeader(fh_from)
headerTo = __readHeader(fh_to)
if headerFrom['archives'] != headerTo['archives']:
raise NotImplementedError(
"%s and %s archive configurations are unalike. "
"Resize the input before merging" % (fh_from.name, fh_to.name))
if now is None:
now = int(time.time())
if (time_to is not None):
untilTime = time_to
else:
untilTime = now
if (time_from is not None):
fromTime = time_from
else:
fromTime = 0
# Sanity check: do not mix the from/to values.
if untilTime < fromTime:
raise ValueError("time_to must be >= time_from")
archives = headerFrom['archives']
archives.sort(key=operator.itemgetter('retention'))
for archive in archives:
archiveFrom = fromTime
archiveTo = untilTime
if archiveFrom < now - archive['retention']:
archiveFrom = now - archive['retention']
# if untilTime is too old, skip this archive
if archiveTo < now - archive['retention']:
continue
(timeInfo, values) = __archive_fetch(fh_from, archive, archiveFrom, archiveTo)
(start, end, archive_step) = timeInfo
pointsToWrite = list(ifilter(
lambda points: points[1] is not None,
izip(xrange(start, end, archive_step), values)))
# skip if there are no points to write
if len(pointsToWrite) == 0:
continue
__archive_update_many(fh_to, headerTo, archive, pointsToWrite)
def diff(path_from, path_to, ignore_empty=False, until_time=None, now=None):
""" Compare two whisper databases. Each file must have the same archive configuration """
with open(path_from, 'rb') as fh_from:
with open(path_to, 'rb') as fh_to:
return file_diff(fh_from, fh_to, ignore_empty, until_time, now)
def file_diff(fh_from, fh_to, ignore_empty=False, until_time=None, now=None):
headerFrom = __readHeader(fh_from)
headerTo = __readHeader(fh_to)
if headerFrom['archives'] != headerTo['archives']:
# TODO: Add specific whisper-resize commands to right size things
raise NotImplementedError(
"%s and %s archive configurations are unalike. "
"Resize the input before diffing" % (fh_from.name, fh_to.name))
archives = headerFrom['archives']
archives.sort(key=operator.itemgetter('retention'))
archive_diffs = []
if now is None:
now = int(time.time())
if until_time:
untilTime = until_time
else:
untilTime = now
for archive_number, archive in enumerate(archives):
diffs = []
startTime = now - archive['retention']
(fromTimeInfo, fromValues) = \
__archive_fetch(fh_from, archive, startTime, untilTime)
(toTimeInfo, toValues) = __archive_fetch(fh_to, archive, startTime, untilTime)
(start, end, archive_step) = \
(min(fromTimeInfo[0], toTimeInfo[0]),
max(fromTimeInfo[1], toTimeInfo[1]),
min(fromTimeInfo[2], toTimeInfo[2]))
points = map(lambda s: (s * archive_step + start, fromValues[s], toValues[s]),
xrange(0, (end - start) // archive_step))
if ignore_empty:
points = [p for p in points if p[1] is not None and p[2] is not None]
else:
points = [p for p in points if p[1] is not None or p[2] is not None]
diffs = [p for p in points if p[1] != p[2]]
archive_diffs.append((archive_number, diffs, points.__len__()))
untilTime = min(startTime, untilTime)
return archive_diffs
|
|
# Python Tools for Visual Studio
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
__author__ = "Microsoft Corporation <ptvshelp@microsoft.com>"
__version__ = "3.0.0.0"
import os.path
import subprocess
import shutil
import sys
import tarfile
import tempfile
MAJOR_VERSION = sys.version_info[:2]
EXECUTABLE = [sys.executable]
if sys.platform == 'cli' and hasattr(sys, '_getframe'):
EXECUTABLE.append('-X:Frames')
if MAJOR_VERSION == (3, 1):
from urllib.request import urlopen
def urlretrieve(url, filename):
fobj = None
uobj = urlopen(url)
try:
fobj = open(filename, 'wb')
fobj.write(uobj.readall())
finally:
uobj.close()
if fobj:
fobj.close()
return filename, None
else:
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
def install_from_source(setuptools_source, pip_source):
setuptools_temp_dir = tempfile.mkdtemp('-setuptools', 'ptvs-')
pip_temp_dir = tempfile.mkdtemp('-pip', 'ptvs-')
cwd = os.getcwd()
try:
os.chdir(setuptools_temp_dir)
print('Downloading setuptools from ' + setuptools_source)
sys.stdout.flush()
setuptools_package, _ = urlretrieve(setuptools_source, 'setuptools.tar.gz')
package = tarfile.open(setuptools_package)
try:
safe_members = [m for m in package.getmembers() if not m.name.startswith(('..', '\\'))]
package.extractall(setuptools_temp_dir, members=safe_members)
finally:
package.close()
extracted_dirs = [d for d in os.listdir(setuptools_temp_dir) if os.path.exists(os.path.join(d, 'setup.py'))]
if not extracted_dirs:
raise OSError("Failed to find setuptools's setup.py")
extracted_dir = extracted_dirs[0]
print('\nInstalling from ' + extracted_dir)
sys.stdout.flush()
os.chdir(extracted_dir)
subprocess.check_call(
EXECUTABLE + ['setup.py', 'install', '--single-version-externally-managed', '--record', 'setuptools.txt']
)
os.chdir(pip_temp_dir)
print('Downloading pip from ' + pip_source)
sys.stdout.flush()
pip_package, _ = urlretrieve(pip_source, 'pip.tar.gz')
package = tarfile.open(pip_package)
try:
safe_members = [m for m in package.getmembers() if not m.name.startswith(('..', '\\'))]
package.extractall(pip_temp_dir, members=safe_members)
finally:
package.close()
extracted_dirs = [d for d in os.listdir(pip_temp_dir) if os.path.exists(os.path.join(d, 'setup.py'))]
if not extracted_dirs:
raise OSError("Failed to find pip's setup.py")
extracted_dir = extracted_dirs[0]
print('\nInstalling from ' + extracted_dir)
sys.stdout.flush()
os.chdir(extracted_dir)
subprocess.check_call(
EXECUTABLE + ['setup.py', 'install', '--single-version-externally-managed', '--record', 'pip.txt']
)
print('\nInstallation Complete')
sys.stdout.flush()
finally:
os.chdir(cwd)
shutil.rmtree(setuptools_temp_dir, ignore_errors=True)
shutil.rmtree(pip_temp_dir, ignore_errors=True)
def install_from_pip(getpip_url):
pip_temp_dir = tempfile.mkdtemp('-pip', 'ptvs-')
try:
print('Downloading pip from ' + getpip_url)
sys.stdout.flush()
pip_script, _ = urlretrieve(getpip_url, os.path.join(pip_temp_dir, 'get-pip.py'))
print('\nInstalling from ' + pip_script)
sys.stdout.flush()
subprocess.check_call(EXECUTABLE + [pip_script])
print('\nInstallation Complete')
sys.stdout.flush()
finally:
shutil.rmtree(pip_temp_dir, ignore_errors=True)
def install_from_ensurepip(ensurepip):
print('Installing with ensurepip')
sys.stdout.flush()
# We can bootstrap with ensurepip, but then have to upgrade to the latest
ensurepip.bootstrap(upgrade=True, default_pip=True)
subprocess.check_call(
EXECUTABLE + ["-m", "pip", "install", "-U", "pip", "setuptools", "wheel"]
)
print('\nInstallation Complete')
sys.stdout.flush()
def main():
try:
import ensurepip
except ImportError:
pass
else:
try:
install_from_ensurepip(ensurepip)
return
except Exception:
if sys.platform == 'cli':
print('\nFailed to upgrade pip, which is probably because of IronPython. Leaving the earlier version.')
return
print("\nFailed to upgrade pip, which probably indicates that it isn't installed properly.")
if MAJOR_VERSION < (2, 5):
print('Python versions earlier than 2.5 are not supported by PTVS.')
return -1
if MAJOR_VERSION == (3, 0):
print('Python 3.0 is not supported by pip and setuptools')
return -2
if MAJOR_VERSION == (2, 5):
install_from_source(
'http://go.microsoft.com/fwlink/?LinkId=317602',
'http://go.microsoft.com/fwlink/?LinkId=313647',
)
return
if MAJOR_VERSION == (3, 1):
install_from_source(
'http://go.microsoft.com/fwlink/?LinkId=616616',
'http://go.microsoft.com/fwlink/?LinkID=616614',
)
return
try:
install_from_pip('https://go.microsoft.com/fwlink/?LinkId=616663')
except Exception:
pass
else:
return
print('\nFailed to install. Attempting direct download.')
install_from_source(
'http://go.microsoft.com/fwlink/?LinkId=317603',
'http://go.microsoft.com/fwlink/?LinkId=317604',
)
def _restart_with_x_frames():
if '--no-ipy-restart' in sys.argv:
print('-X:Frames failed to add _getframe method. Aborting')
return -3
print('Restarting IronPython with -X:Frames')
sys.stdout.flush()
return subprocess.call([sys.executable, '-X:Frames', __file__, '--no-ipy-restart'])
if __name__ == '__main__':
if sys.platform == 'cli' and not hasattr(sys, '_getframe'):
sys.exit(_restart_with_x_frames())
try:
import pip
except ImportError:
pass
else:
print('pip is already available.')
sys.exit(0)
sys.exit(int(main() or 0))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnGatewaysOperations(object):
"""VpnGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnGateway"
"""Retrieves the details of a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.VpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.VpnGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'VpnGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.VpnGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnGateway"]
"""Creates a virtual wan vpn gateway if it doesn't exist else updates the existing gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to create or Update a virtual wan vpn
gateway.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2018_08_01.models.VpnGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_08_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnGateway"]
"""Updates virtual wan vpn gateway tags.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to update a virtual wan vpn gateway tags.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2018_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_08_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnGatewaysResult"]
"""Lists all the VpnGateways in a resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_08_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnGatewaysResult"]
"""Lists all the VpnGateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_08_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnGateways'} # type: ignore
|
|
# Plots are currently included as images, because example is too big to
# run on readthedocs servers
"""
Multiatlas Segmentation
========================
This example shows how to perform multi-atlas segmentation based on MP2RAGE
data by performing the following steps:
1. Downloading three open MP2RAGE datasets using
:func:`nighres.data.download_7T_TRT`
2. Remove the skull and create a brain mask using
:func:`nighres.brain.mp2rage_skullstripping`
3. Atlas-guided tissue classification using MGDMfor first two subjects to
be used as an atlas using :func:`nighres.brain.mgdm_segmentation` [1]_
4. Co-register non-linearly the atlas brains the the third subject using
:func:`nighres.registration.embedded_antspy` [2]_
5. Deform segmentation labels using
:func:`nighres.registration.apply_deformation`
6. Turn individual labels into levelset surfaces using
:func:`nighres.surface.probability_to_levelset`
7. Build a final shape average using
:func: `nighres.shape.levelset_fusion`
Important note: this example is both computationally expensive (recomputing
everything from basic inputs) and practically pointless (a direct MGDM
segmentation or a multi-atlas approach with manually defined labels and more
subjects would both be meaningful). This example is only meant as illustration.
"""
############################################################################
# Import and download
# -------------------
# First we import ``nighres`` and the ``os`` module to set the output directory
# Make sure to run this file in a directory you have write access to, or
# change the ``out_dir`` variable below.
import nighres
import os
import nibabel as nb
in_dir = os.path.join(os.getcwd(), 'nighres_examples/data_sets')
out_dir = os.path.join(os.getcwd(), 'nighres_examples/multiatlas_segmentation')
############################################################################
# We also try to import Nilearn plotting functions. If Nilearn is not
# installed, plotting will be skipped.
skip_plots = False
try:
from nilearn import plotting
except ImportError:
skip_plots = True
print('Nilearn could not be imported, plotting will be skipped')
############################################################################
# Now we download an example MP2RAGE dataset. It is the structural scan of the
# first subject, first session of the 7T Test-Retest dataset published by
# Gorgolewski et al (2015) [3]_.
dataset1 = nighres.data.download_7T_TRT(in_dir, subject_id='sub001_sess1')
dataset2 = nighres.data.download_7T_TRT(in_dir, subject_id='sub002_sess1')
dataset3 = nighres.data.download_7T_TRT(in_dir, subject_id='sub003_sess1')
############################################################################
# Skull stripping
# ----------------
# First we perform skull stripping. Only the second inversion image is required
# to calculate the brain mask. But if we input the T1map and T1w image as well,
# they will be masked for us. We also save the outputs in the ``out_dir``
# specified above and use a subject ID as the base file_name.
skullstripping_results1 = nighres.brain.mp2rage_skullstripping(
second_inversion=dataset1['inv2'],
t1_weighted=dataset1['t1w'],
t1_map=dataset1['t1map'],
save_data=True,
file_name='sub001_sess1',
output_dir=out_dir)
skullstripping_results2 = nighres.brain.mp2rage_skullstripping(
second_inversion=dataset2['inv2'],
t1_weighted=dataset2['t1w'],
t1_map=dataset2['t1map'],
save_data=True,
file_name='sub002_sess1',
output_dir=out_dir)
skullstripping_results3 = nighres.brain.mp2rage_skullstripping(
second_inversion=dataset3['inv2'],
t1_weighted=dataset3['t1w'],
t1_map=dataset3['t1map'],
save_data=True,
file_name='sub003_sess1',
output_dir=out_dir)
############################################################################
# .. tip:: in Nighres functions that have several outputs return a
# dictionary storing the different outputs. You can find the keys in the
# docstring by typing ``nighres.brain.mp2rage_skullstripping?`` or list
# them with ``skullstripping_results.keys()``
#
# To check if the skull stripping worked well we plot the brain mask on top of
# the original image. You can also open the images stored in ``out_dir`` in
# your favourite interactive viewer and scroll through the volume.
#
# Like Nilearn, we use Nibabel SpatialImage objects to pass data internally.
# Therefore, we can directly plot the outputs using `Nilearn plotting functions
# <http://nilearn.github.io/plotting/index.html#different-plotting-functions>`_
# .
if not skip_plots:
plotting.plot_roi(skullstripping_results1['brain_mask'], dataset1['t1map'],
annotate=False, black_bg=False, draw_cross=False,
cmap='autumn')
plotting.plot_roi(skullstripping_results2['brain_mask'], dataset2['t1w'],
annotate=False, black_bg=False, draw_cross=False,
cmap='autumn')
plotting.plot_roi(skullstripping_results3['brain_mask'], dataset3['t1w'],
annotate=False, black_bg=False, draw_cross=False,
cmap='autumn')
############################################################################
#############################################################################
# MGDM classification
# ---------------------
# Next, we use MGDM to estimate anatomical labels from subjects 1 and 2
mgdm_results1 = nighres.brain.mgdm_segmentation(
contrast_image1=skullstripping_results1['t1w_masked'],
contrast_type1="Mp2rage7T",
contrast_image2=skullstripping_results1['t1map_masked'],
contrast_type2="T1map7T",
save_data=True, file_name="sub001_sess1",
output_dir=out_dir)
mgdm_results2 = nighres.brain.mgdm_segmentation(
contrast_image1=skullstripping_results2['t1w_masked'],
contrast_type1="Mp2rage7T",
contrast_image2=skullstripping_results2['t1map_masked'],
contrast_type2="T1map7T",
save_data=True, file_name="sub002_sess1",
output_dir=out_dir)
############################################################################
# Now we look at the topology-constrained segmentation MGDM created
if not skip_plots:
plotting.plot_img(mgdm_results1['segmentation'],
vmin=1, vmax=50, cmap='cubehelix', colorbar=True,
annotate=False, draw_cross=False)
plotting.plot_img(mgdm_results2['segmentation'],
vmin=1, vmax=50, cmap='cubehelix', colorbar=True,
annotate=False, draw_cross=False)
plotting.show()
############################################################################
#############################################################################
# SyN co-registration
# ---------------------
# Next, we use the masked data as input for co-registration. The T1 maps are
# used here as they are supposed to be more similar
ants_results1 = nighres.registration.embedded_antspy(
source_image=skullstripping_results1['t1map_masked'],
target_image=skullstripping_results3['t1map_masked'],
run_rigid=True, run_affine=False, run_syn=False,
coarse_iterations=40,
medium_iterations=0, fine_iterations=0,
cost_function='MutualInformation',
interpolation='NearestNeighbor',
ignore_affine=True,
save_data=True, file_name="sub001_sess1",
output_dir=out_dir)
ants_results2 = nighres.registration.embedded_antspy(
source_image=skullstripping_results2['t1map_masked'],
target_image=skullstripping_results3['t1map_masked'],
run_rigid=True, run_affine=False, run_syn=False,
coarse_iterations=40,
medium_iterations=0, fine_iterations=0,
cost_function='MutualInformation',
interpolation='NearestNeighbor',
ignore_affine=True,
save_data=True, file_name="sub002_sess1",
output_dir=out_dir)
############################################################################
# Now we look at the coregistered image that SyN created
if not skip_plots:
plotting.plot_img(ants_results1['transformed_source'],
annotate=False, draw_cross=False)
plotting.plot_img(ants_results2['transformed_source'],
annotate=False, draw_cross=False)
############################################################################
#############################################################################
# Apply deformations to segmentations
# ------------------------------------
# We use the computed deformation to transform MGDM segmentations
deformed1 = nighres.registration.apply_coordinate_mappings(
image=mgdm_results1['segmentation'],
mapping1=ants_results1['mapping'],
save_data=True, file_name="sub001_sess1_seg",
output_dir=out_dir)
deformed2 = nighres.registration.apply_coordinate_mappings(
image=mgdm_results2['segmentation'],
mapping1=ants_results2['mapping'],
save_data=True, file_name="sub002_sess1_seg",
output_dir=out_dir)
############################################################################
# Now we look at the segmentations deformed by SyN
if not skip_plots:
plotting.plot_img(deformed1['result'],
annotate=False, draw_cross=False)
plotting.plot_img(deformed2['result'],
annotate=False, draw_cross=False)
plotting.show()
############################################################################
#############################################################################
# Transform a selected label into levelset representation
# ---------------------------------------------------------
# We use the deformed MGDM segmentations
# label 32 = left caudate
img1 = nighres.io.load_volume(deformed1['result'])
struct1 = nb.Nifti1Image((img1.get_fdata()==32).astype(float),
img1.affine, img1.header)
img2 = nighres.io.load_volume(deformed2['result'])
struct2 = nb.Nifti1Image((img2.get_fdata()==32).astype(float),
img2.affine, img2.header)
levelset1 = nighres.surface.probability_to_levelset(
probability_image=struct1,
save_data=True, file_name="sub001_sess1_struct",
output_dir=out_dir)
levelset2 = nighres.surface.probability_to_levelset(
probability_image=struct2,
save_data=True, file_name="sub002_sess1_struct",
output_dir=out_dir)
final_seg = nighres.shape.levelset_fusion(levelset_images=[levelset1['result'],
levelset2['result']],
correct_topology=True,
save_data=True, file_name="sub003_sess1_struct_seg",
output_dir=out_dir)
############################################################################
# Now we look at the final segmentation from shape fusion
if not skip_plots:
img = nighres.io.load_volume(levelset1['result'])
mask = nb.Nifti1Image((img.get_fdata()<0).astype(int),
img.affine, img.header)
plotting.plot_roi(mask, dataset3['t1map'],
annotate=False, black_bg=False, draw_cross=False,
cmap='autumn')
img = nighres.io.load_volume(levelset2['result'])
mask = nb.Nifti1Image((img.get_fdata()<0).astype(int),
img.affine, img.header)
plotting.plot_roi(mask, dataset3['t1map'],
annotate=False, black_bg=False, draw_cross=False,
cmap='autumn')
img = nighres.io.load_volume(final_seg['result'])
mask = nb.Nifti1Image((img.get_fdata()<0).astype(int),
img.affine, img.header)
plotting.plot_roi(mask, dataset3['t1map'],
annotate=False, black_bg=False, draw_cross=False,
cmap='autumn')
############################################################################
#############################################################################
# If the example is not run in a jupyter notebook, render the plots:
if not skip_plots:
plotting.show()
#############################################################################
# References
# -----------
# .. [1] Bogovic, Prince and Bazin (2013). A multiple object geometric
# deformable model for image segmentation. DOI: 10.1016/j.cviu.2012.10.006.A
# .. [2] Avants et al (2008). Symmetric diffeomorphic image registration with
# cross-correlation: evaluating automated labeling of elderly and
# neurodegenerative brain. DOI: 10.1016/j.media.2007.06.004
# .. [3] Gorgolewski et al (2015). A high resolution 7-Tesla resting-state fMRI
# test-retest dataset with cognitive and physiological measures.
# DOI: 10.1038/sdata.2014.54
|
|
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from robot import utils
from robot.errors import DataError, FrameworkError
from robot.output import LOGGER, loggerhelper
class _BaseSettings(object):
_cli_opts = {'Name' : ('name', None),
'Doc' : ('doc', None),
'Metadata' : ('metadata', []),
'TestNames' : ('test', []),
'SuiteNames' : ('suite', []),
'SetTag' : ('settag', []),
'Include' : ('include', []),
'Exclude' : ('exclude', []),
'Critical' : ('critical', None),
'NonCritical' : ('noncritical', None),
'OutputDir' : ('outputdir', utils.abspath('.')),
'Log' : ('log', 'log.html'),
'Report' : ('report', 'report.html'),
'XUnitFile' : ('xunitfile', 'NONE'),
'SplitLog' : ('splitlog', False),
'TimestampOutputs' : ('timestampoutputs', False),
'LogTitle' : ('logtitle', None),
'ReportTitle' : ('reporttitle', None),
'ReportBackground' : ('reportbackground',
('#99FF66', '#99FF66', '#FF3333')),
'SuiteStatLevel' : ('suitestatlevel', -1),
'TagStatInclude' : ('tagstatinclude', []),
'TagStatExclude' : ('tagstatexclude', []),
'TagStatCombine' : ('tagstatcombine', []),
'TagDoc' : ('tagdoc', []),
'TagStatLink' : ('tagstatlink', []),
'RemoveKeywords' : ('removekeywords', []),
'NoStatusRC' : ('nostatusrc', False),
'MonitorWidth' : ('monitorwidth', 78),
'MonitorColors' : ('monitorcolors', 'AUTO'),
'StdOut' : ('stdout', None),
'StdErr' : ('stderr', None)}
_output_opts = ['Output', 'Log', 'Report', 'DebugFile', 'XUnitFile']
def __init__(self, options=None, log=True):
self._opts = {}
self._cli_opts = self._cli_opts.copy()
self._cli_opts.update(self._extra_cli_opts)
self._process_cli_opts(options or {}, log)
if log: LOGGER.info('Settings:\n%s' % unicode(self))
def _process_cli_opts(self, opts, log):
for name, (cli_name, default) in self._cli_opts.items():
value = opts.get(cli_name, default)
if value in [None, []]:
value = default
elif default == [] and isinstance(value, basestring):
value = [value]
self[name] = self._process_value(name, value, log)
def __setitem__(self, name, value):
if name not in self._cli_opts:
raise KeyError("Non-existing settings '%s'" % name)
self._opts[name] = value
def _process_value(self, name, value, log):
if name == 'LogLevel':
return self._process_log_level(value)
if value == self._get_default_value(name):
return value
if name in ['Name', 'Doc', 'LogTitle', 'ReportTitle']:
if name == 'Doc': value = self._escape(value)
return value.replace('_', ' ')
if name in ['Metadata', 'TagDoc']:
if name == 'Metadata': value = [self._escape(v) for v in value]
return [self._process_metadata_or_tagdoc(v) for v in value]
if name in ['Include', 'Exclude']:
return [v.replace('AND', '&').replace('_', ' ') for v in value]
if name in self._output_opts and utils.eq(value, 'NONE'):
return 'NONE'
if name == 'OutputDir':
return utils.abspath(value)
if name in ['SuiteStatLevel', 'MonitorWidth']:
return self._convert_to_positive_integer_or_default(name, value)
if name in ['Listeners', 'VariableFiles']:
return [self._split_args_from_name_or_path(item) for item in value]
if name == 'ReportBackground':
return self._process_report_background(value)
if name == 'TagStatCombine':
return [self._process_tag_stat_combine(v) for v in value]
if name == 'TagStatLink':
return [v for v in [self._process_tag_stat_link(v) for v in value] if v]
if name == 'RemoveKeywords':
return [v.upper() for v in value]
return value
def _process_log_level(self, level):
level, visible_level = self._split_log_level(level.upper())
self._opts['VisibleLogLevel'] = visible_level
return level
def _split_log_level(self, level):
if ':' in level:
level, visible_level = level.split(':', 1)
else:
visible_level = level
self._validate_log_level_and_default(level, visible_level)
return level, visible_level
def _validate_log_level_and_default(self, log_level, default):
if log_level not in loggerhelper.LEVELS:
raise DataError("Invalid log level '%s'" % log_level)
if default not in loggerhelper.LEVELS:
raise DataError("Invalid log level '%s'" % default)
if not loggerhelper.IsLogged(log_level)(default):
raise DataError("Default visible log level '%s' is lower than "
"log level '%s'" % (default, log_level))
def __getitem__(self, name):
if name not in self._opts:
raise KeyError("Non-existing setting '%s'" % name)
if name in self._output_opts:
return self._get_output_file(name)
return self._opts[name]
def _get_output_file(self, type_):
"""Returns path of the requested output file and creates needed dirs.
`type_` can be 'Output', 'Log', 'Report', 'DebugFile' or 'XUnitFile'.
"""
name = self._opts[type_]
if self._outputfile_disabled(type_, name):
return 'NONE'
name = self._process_output_name(name, type_)
path = utils.abspath(os.path.join(self['OutputDir'], name))
self._create_output_dir(os.path.dirname(path), type_)
return path
def _process_output_name(self, name, type_):
base, ext = os.path.splitext(name)
if self['TimestampOutputs']:
base = '%s-%s' % (base, utils.get_start_timestamp('', '-', ''))
ext = self._get_output_extension(ext, type_)
return base + ext
def _get_output_extension(self, ext, type_):
if ext != '':
return ext
if type_ in ['Output', 'XUnitFile']:
return '.xml'
if type_ in ['Log', 'Report']:
return '.html'
if type_ == 'DebugFile':
return '.txt'
raise FrameworkError("Invalid output file type: %s" % type_)
def _create_output_dir(self, path, type_):
try:
if not os.path.exists(path):
os.makedirs(path)
except EnvironmentError, err:
raise DataError("Creating %s file directory '%s' failed: %s"
% (type_.lower(), path, err.strerror))
def _process_metadata_or_tagdoc(self, value):
value = value.replace('_', ' ')
if ':' in value:
return value.split(':', 1)
return value, ''
def _process_report_background(self, colors):
if colors.count(':') not in [1, 2]:
LOGGER.error("Invalid report background colors '%s'." % colors)
return self._get_default_value('ReportBackground')
colors = colors.split(':')
if len(colors) == 2:
return colors[0], colors[0], colors[1]
return tuple(colors)
def _process_tag_stat_combine(self, value):
for replwhat, replwith in [('AND', '&'), ('&', ' & '), ('NOT', ' NOT ')]:
value = value.replace(replwhat, replwith)
if ':' not in value:
return value, ''
pattern, title = value.rsplit(':', 1)
return pattern, title.replace('_', ' ')
def _process_tag_stat_link(self, value):
tokens = value.split(':')
if len(tokens) >= 3:
return tokens[0], ':'.join(tokens[1:-1]), tokens[-1]
LOGGER.error("Invalid format for option '--tagstatlink'. "
"Expected 'tag:link:title' but got '%s'." % value)
return None
def _convert_to_positive_integer_or_default(self, name, value):
value = self._convert_to_integer(name, value)
return value if value > 0 else self._get_default_value(name)
def _convert_to_integer(self, name, value):
try:
return int(value)
except ValueError:
LOGGER.error("Option '--%s' expected integer value but got '%s'. "
"Default value used instead." % (name.lower(), value))
return self._get_default_value(name)
def _get_default_value(self, name):
return self._cli_opts[name][1]
def _split_args_from_name_or_path(self, name):
if ':' not in name or os.path.exists(name):
args = []
else:
args = name.split(':')
name = args.pop(0)
# Handle absolute Windows paths with arguments
if len(name) == 1 and args[0].startswith(('/', '\\')):
name = name + ':' + args.pop(0)
if os.path.exists(name):
name = os.path.abspath(name)
return name, args
def __contains__(self, setting):
return setting in self._cli_opts
def __unicode__(self):
return '\n'.join('%s: %s' % (name, self._opts[name])
for name in sorted(self._opts))
@property
def output(self):
return self._get_file('Output')
@property
def log(self):
return self._get_file('Log')
@property
def report(self):
return self._get_file('Report')
@property
def xunit(self):
return self._get_file('XUnitFile')
def _get_file(self, name):
value = self[name]
return value if value != 'NONE' else None
@property
def split_log(self):
return self['SplitLog']
@property
def status_rc(self):
return not self['NoStatusRC']
class RobotSettings(_BaseSettings):
_extra_cli_opts = {'Output' : ('output', 'output.xml'),
'LogLevel' : ('loglevel', 'INFO'),
'RunMode' : ('runmode', []),
'RunEmptySuite' : ('runemptysuite', False),
'WarnOnSkipped' : ('warnonskippedfiles', False),
'Variables' : ('variable', []),
'VariableFiles' : ('variablefile', []),
'Listeners' : ('listener', []),
'DebugFile' : ('debugfile', 'NONE')}
def is_rebot_needed(self):
return not ('NONE' == self['Log'] == self['Report'] == self['XUnitFile'])
def get_rebot_datasource_and_settings(self):
datasource = self['Output']
settings = RebotSettings(log=False)
settings._opts.update(self._opts)
for name in ['Variables', 'VariableFiles', 'Listeners']:
del(settings._opts[name])
for name in ['Include', 'Exclude', 'TestNames', 'SuiteNames', 'Metadata']:
settings._opts[name] = []
for name in ['Name', 'Doc']:
settings._opts[name] = None
settings._opts['Output'] = 'NONE'
settings._opts['LogLevel'] = 'TRACE'
settings._opts['ProcessEmptySuite'] = self['RunEmptySuite']
return datasource, settings
def _outputfile_disabled(self, type_, name):
if name == 'NONE':
return True
return self._opts['Output'] == 'NONE' and type_ != 'DebugFile'
def _escape(self, value):
return utils.escape(value)
class RebotSettings(_BaseSettings):
_extra_cli_opts = {'Output' : ('output', 'NONE'),
'LogLevel' : ('loglevel', 'TRACE'),
'ProcessEmptySuite' : ('processemptysuite', False),
'StartTime' : ('starttime', None),
'EndTime' : ('endtime', None)}
def _outputfile_disabled(self, type_, name):
return name == 'NONE'
def _escape(self, value):
return value
@property
def suite_config(self):
return {
'name': self['Name'],
'doc': self['Doc'],
'metadata': dict(self['Metadata']),
'set_tags': self['SetTag'],
'include_tags': self['Include'],
'exclude_tags': self['Exclude'],
'include_suites': self['SuiteNames'],
'include_tests': self['TestNames'],
'process_empty_suite': self['ProcessEmptySuite'],
'remove_keywords': self['RemoveKeywords'],
'log_level': self['LogLevel'],
'critical': self['Critical'],
'noncritical': self['NonCritical'],
'starttime': self['StartTime'],
'endtime': self['EndTime']
}
@property
def statistics_config(self):
return {
'suite_stat_level': self['SuiteStatLevel'],
'tag_stat_include': self['TagStatInclude'],
'tag_stat_exclude': self['TagStatExclude'],
'tag_stat_combine': self['TagStatCombine'],
'tag_stat_link': self['TagStatLink'],
'tag_doc': self['TagDoc'],
}
@property
def log_config(self):
if not self.log:
return {}
return {
'title': self['LogTitle'],
'reportURL': self._url_from_path(self.log, self.report),
'splitLogBase': os.path.basename(os.path.splitext(self.log)[0]),
'defaultLevel': self['VisibleLogLevel']
}
@property
def report_config(self):
if not self.report:
return {}
return {
'title': self['ReportTitle'],
'logURL': self._url_from_path(self.report, self.log),
'background' : self._resolve_background_colors(),
}
def _url_from_path(self, source, destination):
if not destination:
return None
return utils.get_link_path(destination, os.path.dirname(source))
def _resolve_background_colors(self):
colors = self['ReportBackground']
return {'pass': colors[0], 'nonCriticalFail': colors[1], 'fail': colors[2]}
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import gen_nn_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropInputGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [None,
nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]),
op.inputs[2], op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("Conv2DBackpropFilter")
def _Conv2DBackpropFilterGrad(op, grad):
return [
nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]), grad, op.inputs[2],
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
None,
nn_ops.conv2d(
op.inputs[0], grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))
]
@ops.RegisterGradient("Conv3D")
def _Conv3DGrad(op, grad):
return [nn_ops.conv3d_backprop_input_v2(array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding")),
nn_ops.conv3d_backprop_filter_v2(op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))]
@ops.RegisterGradient("Conv3DBackpropInputV2")
def _Conv3DBackpropInputGrad(op, grad):
return [None,
nn_ops.conv3d_backprop_filter_v2(grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
strides=op.get_attr("strides"),
padding=op.get_attr("padding")),
nn_ops.conv3d(grad,
op.inputs[1],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))]
@ops.RegisterGradient("Conv3DBackpropFilterV2")
def _Conv3DBackpropFilterGrad(op, grad):
return [nn_ops.conv3d_backprop_input_v2(array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
strides=op.get_attr("strides"),
padding=op.get_attr("padding")),
None,
nn_ops.conv3d(op.inputs[0],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))]
@ops.RegisterGradient("AvgPool3D")
def _AvgPool3DGrad(op, grad):
return nn_ops.avg_pool3d_grad(
array_ops.shape(op.inputs[0]),
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))
@ops.RegisterGradient("MaxPool3D")
def _MaxPool3DGrad(op, grad):
return nn_ops.max_pool3d_grad(op.inputs[0],
op.outputs[0],
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
Returns:
gradient w.r.t the input to the softmax
"""
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax - array_ops.reshape(
math_ops.reduce_sum(grad_softmax * softmax, [1]), [-1, 1])) * softmax)
return grad_x
@ops.RegisterGradient("LogSoftmax")
def _LogSoftmaxGrad(op, grad):
"""The gradient for log_softmax.
log_softmax = input - log(sum(exp(input))
dlog_softmax/dinput = diag - softmax(input)
Args:
op: The log softmax op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
softmax = math_ops.exp(op.outputs[0])
return grad - math_ops.reduce_sum(grad, 1, keep_dims=True) * softmax
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad, gen_nn_ops.bias_add_grad(out_backprop=received_grad,
data_format=data_format))
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad,
reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops._relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops._relu6_grad(grad, op.inputs[0])
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops._elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return gen_nn_ops._softplus_grad(grad, op.inputs[0])
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops._softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._relu_grad(grad, x), array_ops.zeros(
shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
return [nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d_backprop_filter(op.inputs[0],
array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]), op.inputs[1], grad,
op.get_attr("strides"), op.get_attr("padding")),
nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0], array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"), op.get_attr("padding"))
]
@ops.RegisterGradient("Dilation2D")
def _Dilation2DGrad(op, grad):
return [nn_ops.dilation2d_backprop_input(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding")),
nn_ops.dilation2d_backprop_filter(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding"))]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [gen_nn_ops._lrn_grad(grad, op.inputs[0], op.outputs[0], depth_radius,
bias, alpha, beta)]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops._avg_pool_grad(
array_ops.shape(op.inputs[0]),
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops._max_pool_grad(op.inputs[0],
op.outputs[0],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("FractionalMaxPool")
def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalMaxPool.
Since FractionalMaxPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalMaxPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalMaxPool op.
"""
# pylint: disable=protected-access
return gen_nn_ops._fractional_max_pool_grad(op.inputs[0], op.outputs[0],
grad_0, op.outputs[1],
op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("FractionalAvgPool")
def _FractionalAvgPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalAvgPool.
Since FractionalAvgPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalAvgPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalAvgPool op.
"""
# pylint: disable=protected-access
return gen_nn_ops._fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0,
op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
@ops.RegisterGradient("TopK")
@ops.RegisterGradient("TopKV2")
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.pack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [array_ops.reshape(
sparse_ops.sparse_to_dense(ind,
array_ops.reshape(
math_ops.reduce_prod(in_shape), [1]),
array_ops.reshape(grad, [-1]),
validate_indices=False),
in_shape), array_ops.zeros(
[], dtype=dtypes.int32)]
|
|
import re
from django.conf import settings
import six
from rest_framework import exceptions, serializers
from olympia import amo
from olympia.accounts.serializers import BaseUserSerializer
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.urlresolvers import get_outgoing_url, reverse
from olympia.api.fields import (
ESTranslationSerializerField, ReverseChoiceField,
TranslationSerializerField)
from olympia.api.serializers import BaseESSerializer
from olympia.api.utils import is_gate_active
from olympia.applications.models import AppVersion
from olympia.bandwagon.models import Collection
from olympia.constants.applications import APPS_ALL
from olympia.constants.base import ADDON_TYPE_CHOICES_API
from olympia.constants.categories import CATEGORIES_BY_ID
from olympia.files.models import File
from olympia.search.filters import AddonAppVersionQueryParam
from olympia.users.models import UserProfile
from olympia.versions.models import (
ApplicationsVersions, License, Version, VersionPreview)
from .models import (
Addon, CompatOverride, Persona, Preview, ReplacementAddon, attach_tags)
class FileSerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField()
platform = ReverseChoiceField(
choices=list(amo.PLATFORM_CHOICES_API.items()))
status = ReverseChoiceField(choices=list(amo.STATUS_CHOICES_API.items()))
permissions = serializers.ListField(
source='webext_permissions_list',
child=serializers.CharField())
is_restart_required = serializers.BooleanField()
class Meta:
model = File
fields = ('id', 'created', 'hash', 'is_restart_required',
'is_webextension', 'is_mozilla_signed_extension',
'platform', 'size', 'status', 'url', 'permissions')
def get_url(self, obj):
# File.get_url_path() is a little different, it's already absolute, but
# needs a src parameter that is appended as a query string.
return obj.get_url_path(src='')
class PreviewSerializer(serializers.ModelSerializer):
caption = TranslationSerializerField()
image_url = serializers.SerializerMethodField()
thumbnail_url = serializers.SerializerMethodField()
class Meta:
# Note: this serializer can also be used for VersionPreview.
model = Preview
fields = ('id', 'caption', 'image_size', 'image_url', 'thumbnail_size',
'thumbnail_url')
def get_image_url(self, obj):
return absolutify(obj.image_url)
def get_thumbnail_url(self, obj):
return absolutify(obj.thumbnail_url)
class ESPreviewSerializer(BaseESSerializer, PreviewSerializer):
# Because we have translated fields and dates coming from ES, we can't use
# a regular PreviewSerializer to handle previews for ESAddonSerializer.
# Unfortunately we also need to get the class right (it can be either
# Preview or VersionPreview) so fake_object() implementation in this class
# does nothing, the instance has already been created by a parent
# serializer.
datetime_fields = ('modified',)
translated_fields = ('caption',)
def fake_object(self, data):
return data
class LicenseSerializer(serializers.ModelSerializer):
is_custom = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
text = TranslationSerializerField()
url = serializers.SerializerMethodField()
class Meta:
model = License
fields = ('id', 'is_custom', 'name', 'text', 'url')
def __init__(self, *args, **kwargs):
super(LicenseSerializer, self).__init__(*args, **kwargs)
self.db_name = TranslationSerializerField()
self.db_name.bind('name', self)
def get_is_custom(self, obj):
return not bool(obj.builtin)
def get_url(self, obj):
return obj.url or self.get_version_license_url(obj)
def get_version_license_url(self, obj):
# We need the version associated with the license, because that's where
# the license_url() method lives. The problem is, normally we would not
# be able to do that, because there can be multiple versions for a
# given License. However, since we're serializing through a nested
# serializer, we cheat and use `instance.version_instance` which is
# set by SimpleVersionSerializer.to_representation() while serializing.
# Only get the version license url for non-builtin licenses.
if not obj.builtin and hasattr(obj, 'version_instance'):
return absolutify(obj.version_instance.license_url())
return None
def get_name(self, obj):
# See if there is a license constant
license_constant = obj._constant
if not license_constant:
# If not fall back on the name in the database.
return self.db_name.get_attribute(obj)
else:
request = self.context.get('request', None)
if request and request.method == 'GET' and 'lang' in request.GET:
# A single lang requested so return a flat string
return six.text_type(license_constant.name)
else:
# Otherwise mock the dict with the default lang.
lang = getattr(request, 'LANG', None) or settings.LANGUAGE_CODE
return {lang: six.text_type(license_constant.name)}
def to_representation(self, instance):
data = super(LicenseSerializer, self).to_representation(instance)
request = self.context.get('request', None)
if request and is_gate_active(
request, 'del-version-license-is-custom'):
data.pop('is_custom', None)
return data
class CompactLicenseSerializer(LicenseSerializer):
class Meta:
model = License
fields = ('id', 'is_custom', 'name', 'url')
class MinimalVersionSerializer(serializers.ModelSerializer):
files = FileSerializer(source='all_files', many=True)
class Meta:
model = Version
fields = ('id', 'files', 'reviewed', 'version')
class SimpleVersionSerializer(MinimalVersionSerializer):
compatibility = serializers.SerializerMethodField()
edit_url = serializers.SerializerMethodField()
is_strict_compatibility_enabled = serializers.SerializerMethodField()
license = CompactLicenseSerializer()
release_notes = TranslationSerializerField()
class Meta:
model = Version
fields = ('id', 'compatibility', 'edit_url', 'files',
'is_strict_compatibility_enabled', 'license',
'release_notes', 'reviewed', 'version')
def to_representation(self, instance):
# Help the LicenseSerializer find the version we're currently
# serializing.
if 'license' in self.fields and instance.license:
instance.license.version_instance = instance
return super(SimpleVersionSerializer, self).to_representation(instance)
def get_compatibility(self, obj):
return {
app.short: {
'min': compat.min.version if compat else (
amo.D2C_MIN_VERSIONS.get(app.id, '1.0')),
'max': compat.max.version if compat else amo.FAKE_MAX_VERSION
} for app, compat in obj.compatible_apps.items()
}
def get_edit_url(self, obj):
return absolutify(obj.addon.get_dev_url(
'versions.edit', args=[obj.pk], prefix_only=True))
def get_is_strict_compatibility_enabled(self, obj):
return any(file_.strict_compatibility for file_ in obj.all_files)
class VersionSerializer(SimpleVersionSerializer):
channel = ReverseChoiceField(
choices=list(amo.CHANNEL_CHOICES_API.items()))
license = LicenseSerializer()
class Meta:
model = Version
fields = ('id', 'channel', 'compatibility', 'edit_url', 'files',
'is_strict_compatibility_enabled', 'license',
'release_notes', 'reviewed', 'version')
class CurrentVersionSerializer(SimpleVersionSerializer):
def to_representation(self, obj):
# If the add-on is a langpack, and `appversion` is passed, try to
# determine the latest public compatible version and replace the obj
# with the result. Because of the perf impact, only done for langpacks
# in the detail API.
request = self.context.get('request')
view = self.context.get('view')
addon = obj.addon
if (request and request.GET.get('appversion') and
getattr(view, 'action', None) == 'retrieve' and
addon.type == amo.ADDON_LPAPP):
obj = self.get_current_compatible_version(addon)
return super(CurrentVersionSerializer, self).to_representation(obj)
def get_current_compatible_version(self, addon):
"""
Return latest public version compatible with the app & appversion
passed through the request, or fall back to addon.current_version if
none is found.
Only use on langpacks if the appversion parameter is present.
"""
request = self.context.get('request')
try:
# AddonAppVersionQueryParam.get_values() returns (app_id, min, max)
# but we want {'min': min, 'max': max}.
value = AddonAppVersionQueryParam(request).get_values()
application = value[0]
appversions = dict(zip(('min', 'max'), value[1:]))
except ValueError as exc:
raise exceptions.ParseError(six.text_type(exc))
version_qs = Version.objects.latest_public_compatible_with(
application, appversions).filter(addon=addon)
return version_qs.first() or addon.current_version
class ESCompactLicenseSerializer(BaseESSerializer, CompactLicenseSerializer):
translated_fields = ('name', )
def __init__(self, *args, **kwargs):
super(ESCompactLicenseSerializer, self).__init__(*args, **kwargs)
self.db_name = ESTranslationSerializerField()
self.db_name.bind('name', self)
def fake_object(self, data):
# We just pass the data as the fake object will have been created
# before by ESAddonSerializer.fake_version_object()
return data
class ESCurrentVersionSerializer(BaseESSerializer, CurrentVersionSerializer):
license = ESCompactLicenseSerializer()
datetime_fields = ('reviewed',)
translated_fields = ('release_notes',)
def fake_object(self, data):
# We just pass the data as the fake object will have been created
# before by ESAddonSerializer.fake_version_object()
return data
class AddonEulaPolicySerializer(serializers.ModelSerializer):
eula = TranslationSerializerField()
privacy_policy = TranslationSerializerField()
class Meta:
model = Addon
fields = (
'eula',
'privacy_policy',
)
class AddonDeveloperSerializer(BaseUserSerializer):
picture_url = serializers.SerializerMethodField()
class Meta(BaseUserSerializer.Meta):
fields = BaseUserSerializer.Meta.fields + (
'picture_url',)
read_only_fields = fields
class AddonSerializer(serializers.ModelSerializer):
authors = AddonDeveloperSerializer(many=True, source='listed_authors')
categories = serializers.SerializerMethodField()
contributions_url = serializers.URLField(source='contributions')
current_version = CurrentVersionSerializer()
description = TranslationSerializerField()
developer_comments = TranslationSerializerField()
edit_url = serializers.SerializerMethodField()
has_eula = serializers.SerializerMethodField()
has_privacy_policy = serializers.SerializerMethodField()
homepage = TranslationSerializerField()
icon_url = serializers.SerializerMethodField()
icons = serializers.SerializerMethodField()
is_source_public = serializers.BooleanField(source='view_source')
is_featured = serializers.SerializerMethodField()
name = TranslationSerializerField()
previews = PreviewSerializer(many=True, source='current_previews')
ratings = serializers.SerializerMethodField()
ratings_url = serializers.SerializerMethodField()
review_url = serializers.SerializerMethodField()
status = ReverseChoiceField(choices=list(amo.STATUS_CHOICES_API.items()))
summary = TranslationSerializerField()
support_email = TranslationSerializerField()
support_url = TranslationSerializerField()
tags = serializers.SerializerMethodField()
theme_data = serializers.SerializerMethodField()
type = ReverseChoiceField(choices=list(amo.ADDON_TYPE_CHOICES_API.items()))
url = serializers.SerializerMethodField()
class Meta:
model = Addon
fields = (
'id',
'authors',
'average_daily_users',
'categories',
'contributions_url',
'created',
'current_version',
'default_locale',
'description',
'developer_comments',
'edit_url',
'guid',
'has_eula',
'has_privacy_policy',
'homepage',
'icon_url',
'icons',
'is_disabled',
'is_experimental',
'is_featured',
'is_recommended',
'is_source_public',
'last_updated',
'name',
'previews',
'public_stats',
'ratings',
'ratings_url',
'requires_payment',
'review_url',
'slug',
'status',
'summary',
'support_email',
'support_url',
'tags',
'theme_data',
'type',
'url',
'weekly_downloads'
)
def to_representation(self, obj):
data = super(AddonSerializer, self).to_representation(obj)
request = self.context.get('request', None)
if 'theme_data' in data and data['theme_data'] is None:
data.pop('theme_data')
if ('request' in self.context and
'wrap_outgoing_links' in self.context['request'].GET):
for key in ('homepage', 'support_url', 'contributions_url'):
if key in data:
data[key] = self.outgoingify(data[key])
if obj.type == amo.ADDON_PERSONA:
if 'weekly_downloads' in data:
# weekly_downloads don't make sense for lightweight themes.
data.pop('weekly_downloads')
if ('average_daily_users' in data and
not self.is_broken_persona(obj)):
# In addition, their average_daily_users number must come from
# the popularity field of the attached Persona.
data['average_daily_users'] = obj.persona.popularity
if request and is_gate_active(request, 'del-addons-created-field'):
data.pop('created', None)
return data
def outgoingify(self, data):
if data:
if isinstance(data, six.string_types):
return get_outgoing_url(data)
elif isinstance(data, dict):
return {key: get_outgoing_url(value) if value else None
for key, value in data.items()}
# None or empty string... don't bother.
return data
def get_categories(self, obj):
return {
app_short_name: [cat.slug for cat in categories]
for app_short_name, categories in obj.app_categories.items()
}
def get_has_eula(self, obj):
return bool(getattr(obj, 'has_eula', obj.eula))
def get_is_featured(self, obj):
# obj._is_featured is set from ES, so will only be present for list
# requests.
if not hasattr(obj, '_is_featured'):
# Any featuring will do.
obj._is_featured = obj.is_featured(app=None, lang=None)
return obj._is_featured
def get_has_privacy_policy(self, obj):
return bool(getattr(obj, 'has_privacy_policy', obj.privacy_policy))
def get_tags(self, obj):
if not hasattr(obj, 'tag_list'):
attach_tags([obj])
# attach_tags() might not have attached anything to the addon, if it
# had no tags.
return getattr(obj, 'tag_list', [])
def get_url(self, obj):
# Use get_detail_url(), get_url_path() does an extra check on
# current_version that is annoying in subclasses which don't want to
# load that version.
return absolutify(obj.get_detail_url())
def get_edit_url(self, obj):
return absolutify(obj.get_dev_url())
def get_ratings_url(self, obj):
return absolutify(obj.ratings_url)
def get_review_url(self, obj):
return absolutify(reverse('reviewers.review', args=[obj.pk]))
def get_icon_url(self, obj):
if self.is_broken_persona(obj):
return absolutify(obj.get_default_icon_url(64))
return absolutify(obj.get_icon_url(64))
def get_icons(self, obj):
if self.is_broken_persona(obj):
get_icon = obj.get_default_icon_url
else:
get_icon = obj.get_icon_url
return {str(size): absolutify(get_icon(size))
for size in amo.ADDON_ICON_SIZES}
def get_ratings(self, obj):
return {
'average': obj.average_rating,
'bayesian_average': obj.bayesian_rating,
'count': obj.total_ratings,
'text_count': obj.text_ratings_count,
}
def get_theme_data(self, obj):
theme_data = None
if obj.type == amo.ADDON_PERSONA and not self.is_broken_persona(obj):
theme_data = obj.persona.theme_data
return theme_data
def is_broken_persona(self, obj):
"""Find out if the object is a Persona and either is missing its
Persona instance or has a broken one.
Call this everytime something in the serializer is suceptible to call
something on the Persona instance, explicitly or not, to avoid 500
errors and/or SQL queries in ESAddonSerializer."""
try:
# Setting obj.persona = None in ESAddonSerializer.fake_object()
# below sadly isn't enough, so we work around it in that method by
# creating a Persona instance with a custom '_broken'
# attribute indicating that it should not be used.
if obj.type == amo.ADDON_PERSONA and (
obj.persona is None or hasattr(obj.persona, '_broken')):
raise Persona.DoesNotExist
except Persona.DoesNotExist:
# We got a DoesNotExist exception, therefore the Persona does not
# exist or is broken.
return True
# Everything is fine, move on.
return False
class AddonSerializerWithUnlistedData(AddonSerializer):
latest_unlisted_version = SimpleVersionSerializer()
class Meta:
model = Addon
fields = AddonSerializer.Meta.fields + ('latest_unlisted_version',)
class SimpleAddonSerializer(AddonSerializer):
class Meta:
model = Addon
fields = ('id', 'slug', 'name', 'icon_url')
class ESAddonSerializer(BaseESSerializer, AddonSerializer):
# Override various fields for related objects which we don't want to expose
# data the same way than the regular serializer does (usually because we
# some of the data is not indexed in ES).
authors = BaseUserSerializer(many=True, source='listed_authors')
current_version = ESCurrentVersionSerializer()
previews = ESPreviewSerializer(many=True, source='current_previews')
_score = serializers.SerializerMethodField()
datetime_fields = ('created', 'last_updated', 'modified')
translated_fields = ('name', 'description', 'developer_comments',
'homepage', 'summary', 'support_email', 'support_url')
class Meta:
model = Addon
fields = AddonSerializer.Meta.fields + ('_score', )
def fake_preview_object(self, obj, data, model_class=Preview):
# This is what ESPreviewSerializer.fake_object() would do, but we do
# it here and make that fake_object() method a no-op in order to have
# access to the right model_class to use - VersionPreview for static
# themes, Preview for the rest.
preview = model_class(id=data['id'], sizes=data.get('sizes', {}))
preview.addon = obj
preview.version = obj.current_version
preview_serializer = self.fields['previews'].child
# Attach base attributes that have the same name/format in ES and in
# the model.
preview_serializer._attach_fields(preview, data, ('modified',))
# Attach translations.
preview_serializer._attach_translations(
preview, data, preview_serializer.translated_fields)
return preview
def fake_file_object(self, obj, data):
file_ = File(
id=data['id'], created=self.handle_date(data['created']),
hash=data['hash'], filename=data['filename'],
is_webextension=data.get('is_webextension'),
is_mozilla_signed_extension=data.get(
'is_mozilla_signed_extension'),
is_restart_required=data.get('is_restart_required', False),
platform=data['platform'], size=data['size'],
status=data['status'],
strict_compatibility=data.get('strict_compatibility', False),
version=obj)
file_.webext_permissions_list = data.get('webext_permissions_list', [])
return file_
def fake_version_object(self, obj, data, channel):
if data:
version = Version(
addon=obj, id=data['id'],
reviewed=self.handle_date(data['reviewed']),
version=data['version'], channel=channel)
version.all_files = [
self.fake_file_object(version, file_data)
for file_data in data.get('files', [])
]
# In ES we store integers for the appversion info, we need to
# convert it back to strings.
compatible_apps = {}
for app_id, compat_dict in data.get('compatible_apps', {}).items():
app_name = APPS_ALL[int(app_id)]
compatible_apps[app_name] = ApplicationsVersions(
min=AppVersion(version=compat_dict.get('min_human', '')),
max=AppVersion(version=compat_dict.get('max_human', '')))
version._compatible_apps = compatible_apps
version_serializer = self.fields['current_version']
version_serializer._attach_translations(
version, data, version_serializer.translated_fields)
if 'license' in data:
license_serializer = version_serializer.fields['license']
version.license = License(id=data['license']['id'])
license_serializer._attach_fields(
version.license, data['license'], ('builtin', 'url'))
# Can't use license_serializer._attach_translations() directly
# because 'name' is a SerializerMethodField, not an
# ESTranslatedField.
license_serializer.db_name.attach_translations(
version.license, data['license'], 'name')
else:
version.license = None
else:
version = None
return version
def fake_object(self, data):
"""Create a fake instance of Addon and related models from ES data."""
obj = Addon(id=data['id'], slug=data['slug'])
# Attach base attributes that have the same name/format in ES and in
# the model.
self._attach_fields(
obj, data, (
'average_daily_users',
'bayesian_rating',
'contributions',
'created',
'default_locale',
'guid',
'has_eula',
'has_privacy_policy',
'hotness',
'icon_hash',
'icon_type',
'is_experimental',
'is_recommended',
'last_updated',
'modified',
'public_stats',
'requires_payment',
'slug',
'status',
'type',
'view_source',
'weekly_downloads'
)
)
# Attach attributes that do not have the same name/format in ES.
obj.tag_list = data.get('tags', [])
obj.all_categories = [
CATEGORIES_BY_ID[cat_id] for cat_id in data.get('category', [])]
# Not entirely accurate, but enough in the context of the search API.
obj.disabled_by_user = data.get('is_disabled', False)
# Attach translations (they require special treatment).
self._attach_translations(obj, data, self.translated_fields)
# Attach related models (also faking them). `current_version` is a
# property we can't write to, so we use the underlying field which
# begins with an underscore.
obj._current_version = self.fake_version_object(
obj, data.get('current_version'), amo.RELEASE_CHANNEL_LISTED)
data_authors = data.get('listed_authors', [])
obj.listed_authors = [
UserProfile(
id=data_author['id'], display_name=data_author['name'],
username=data_author['username'],
is_public=data_author.get('is_public', False))
for data_author in data_authors
]
is_static_theme = data.get('type') == amo.ADDON_STATICTHEME
preview_model_class = VersionPreview if is_static_theme else Preview
obj.current_previews = [
self.fake_preview_object(
obj, preview_data, model_class=preview_model_class)
for preview_data in data.get('previews', [])
]
ratings = data.get('ratings', {})
obj.average_rating = ratings.get('average')
obj.total_ratings = ratings.get('count')
obj.text_ratings_count = ratings.get('text_count')
obj._is_featured = data.get('is_featured', False)
if data['type'] == amo.ADDON_PERSONA:
persona_data = data.get('persona')
if persona_data:
obj.persona = Persona(
addon=obj,
accentcolor=persona_data['accentcolor'],
display_username=persona_data['author'],
header=persona_data['header'],
footer=persona_data['footer'],
# "New" Persona do not have a persona_id, it's a relic from
# old ones.
persona_id=0 if persona_data['is_new'] else 42,
textcolor=persona_data['textcolor'],
popularity=data.get('average_daily_users'),
)
else:
# Sadly, although we can set obj.persona = None, this does not
# seem to prevent the query later on. So instead, work around
# it by creating a Persona instance with a custom attribute
# indicating that it should not be used.
obj.persona = Persona()
obj.persona._broken = True
return obj
def get__score(self, obj):
# es_meta is added by BaseESSerializer.to_representation() before DRF's
# to_representation() is called, so it's present on all objects.
return obj._es_meta['score']
def to_representation(self, obj):
data = super(ESAddonSerializer, self).to_representation(obj)
request = self.context.get('request')
if request and '_score' in data and not is_gate_active(
request, 'addons-search-_score-field'):
data.pop('_score')
return data
class ESAddonAutoCompleteSerializer(ESAddonSerializer):
class Meta(ESAddonSerializer.Meta):
fields = ('id', 'icon_url', 'is_recommended', 'name', 'type', 'url')
model = Addon
def get_url(self, obj):
# Addon.get_url_path() wants current_version to exist, but that's just
# a safeguard. We don't care and don't want to fetch the current
# version field to improve perf, so give it a fake one.
obj._current_version = Version()
return absolutify(obj.get_url_path())
class StaticCategorySerializer(serializers.Serializer):
"""Serializes a `StaticCategory` as found in constants.categories"""
id = serializers.IntegerField()
name = serializers.CharField()
slug = serializers.CharField()
application = serializers.SerializerMethodField()
misc = serializers.BooleanField()
type = serializers.SerializerMethodField()
weight = serializers.IntegerField()
description = serializers.CharField()
def get_application(self, obj):
return APPS_ALL[obj.application].short
def get_type(self, obj):
return ADDON_TYPE_CHOICES_API[obj.type]
class LanguageToolsSerializer(AddonSerializer):
target_locale = serializers.CharField()
current_compatible_version = serializers.SerializerMethodField()
class Meta:
model = Addon
fields = ('id', 'current_compatible_version', 'default_locale', 'guid',
'name', 'slug', 'target_locale', 'type', 'url', )
def get_current_compatible_version(self, obj):
compatible_versions = getattr(obj, 'compatible_versions', None)
if compatible_versions is not None:
data = MinimalVersionSerializer(
compatible_versions, many=True).data
try:
# 99% of the cases there will only be one result, since most
# language packs are automatically uploaded for a given app
# version. If there are more, pick the most recent one.
return data[0]
except IndexError:
# This should not happen, because the queryset in the view is
# supposed to filter results to only return add-ons that do
# have at least one compatible version, but let's not fail
# too loudly if the unthinkable happens...
pass
return None
def to_representation(self, obj):
data = super(LanguageToolsSerializer, self).to_representation(obj)
request = self.context['request']
if (AddonAppVersionQueryParam.query_param not in request.GET and
'current_compatible_version' in data):
data.pop('current_compatible_version')
if request and is_gate_active(
request, 'addons-locale_disambiguation-shim'):
data['locale_disambiguation'] = None
return data
class ReplacementAddonSerializer(serializers.ModelSerializer):
replacement = serializers.SerializerMethodField()
ADDON_PATH_REGEX = r"""/addon/(?P<addon_id>[^/<>"']+)/$"""
COLLECTION_PATH_REGEX = (
r"""/collections/(?P<user_id>[^/<>"']+)/(?P<coll_slug>[^/]+)/$""")
class Meta:
model = ReplacementAddon
fields = ('guid', 'replacement')
def _get_addon_guid(self, addon_id):
try:
addon = Addon.objects.public().id_or_slug(addon_id).get()
except Addon.DoesNotExist:
return []
return [addon.guid]
def _get_collection_guids(self, user_id, collection_slug):
try:
get_args = {'slug': collection_slug, 'listed': True}
if isinstance(user_id, six.string_types) and not user_id.isdigit():
get_args.update(**{'author__username': user_id})
else:
get_args.update(**{'author': user_id})
collection = Collection.objects.get(**get_args)
except Collection.DoesNotExist:
return []
valid_q = Addon.objects.get_queryset().valid_q([amo.STATUS_APPROVED])
return list(
collection.addons.filter(valid_q).values_list('guid', flat=True))
def get_replacement(self, obj):
if obj.has_external_url():
# It's an external url so no guids.
return []
addon_match = re.search(self.ADDON_PATH_REGEX, obj.path)
if addon_match:
return self._get_addon_guid(addon_match.group('addon_id'))
coll_match = re.search(self.COLLECTION_PATH_REGEX, obj.path)
if coll_match:
return self._get_collection_guids(
coll_match.group('user_id'), coll_match.group('coll_slug'))
return []
class CompatOverrideSerializer(serializers.ModelSerializer):
class VersionRangeSerializer(serializers.Serializer):
class ApplicationSerializer(serializers.Serializer):
name = serializers.CharField(source='app.pretty')
id = serializers.IntegerField(source='app.id')
min_version = serializers.CharField(source='min')
max_version = serializers.CharField(source='max')
guid = serializers.CharField(source='app.guid')
addon_min_version = serializers.CharField(source='min')
addon_max_version = serializers.CharField(source='max')
applications = ApplicationSerializer(source='apps', many=True)
addon_id = serializers.IntegerField()
addon_guid = serializers.CharField(source='guid')
version_ranges = VersionRangeSerializer(
source='collapsed_ranges', many=True)
class Meta:
model = CompatOverride
fields = ('addon_id', 'addon_guid', 'name', 'version_ranges')
def get_addon_id(self, obj):
return obj.addon_id
|
|
# Parser, based on John Aycock's SPARK examples
from spark import GenericParser
from spark import GenericASTBuilder
from ast import AST
class GrammaticalError(Exception):
def __init__(self, string):
self.string = string
def __str__(self):
return self.string
class CoreParser(GenericParser):
def __init__(self, start):
GenericParser.__init__(self, start)
def typestring(self, token):
return token.type
def error(self, token):
raise GrammaticalError(
"Unexpected token `%s' (word number %d)" % (token, token.wordno))
def p_chained_commands(self, args):
'''
chained_commands ::= single_command
chained_commands ::= single_command chained_commands
'''
if(len(args) == 1):
return AST('chain', None, [ args[0] ])
else:
args[1].children.insert(0, args[0])
return args[1]
def p_single_command(self, args):
'''
single_command ::= letter
single_command ::= sky_letter
single_command ::= number_rule
single_command ::= movement
single_command ::= character
single_command ::= editing
single_command ::= modifiers
single_command ::= english
single_command ::= word_sentence
single_command ::= word_phrase
'''
return args[0]
def p_movement(self, args):
'''
movement ::= up repeat
movement ::= down repeat
movement ::= left repeat
movement ::= right repeat
'''
if args[1] != None:
return AST('repeat', [ args[1] ], [
AST('movement', [ args[0] ])
])
else:
return AST('movement', [ args[0] ])
def p_repeat(self, args):
'''
repeat ::=
repeat ::= number_set
'''
if len(args) > 0:
return args[0]
else:
return None
small_numbers = {
'zero' : 0,
'one' : 1,
'two' : 2,
'three' : 3,
'four' : 4,
'five' : 5,
'six' : 6,
'seven' : 7,
'eight' : 8,
'nine' : 9,
'ten' : 10,
'eleven' : 11,
'twelve' : 12,
'thirteen' : 13,
'fourteen' : 14,
'fifteen' : 15,
'sixteen' : 16,
'seventeen' : 17,
'eighteen' : 18,
'nineteen' : 19,
# sadly, kaldi often recognizes these by accident
'to' : 2,
'for' : 4,
}
def p_number_rule(self, args):
'''
number_rule ::= number number_set
number_rule ::= number thousand_number_set
number_rule ::= number million_number_set
number_rule ::= number billion_number_set
'''
return AST('sequence', [ str(args[1]) ])
def p_number_set(self, args):
'''
number_set ::= _firstnumbers
number_set ::= _tens
number_set ::= _tens _ones
number_set ::= _hundreds
number_set ::= _hundreds _firstnumbers
number_set ::= _hundreds _tens
number_set ::= _hundreds _tens _ones
'''
return sum(args)
def p__ones(self, args):
'''
_ones ::= one
_ones ::= two
_ones ::= three
_ones ::= four
_ones ::= five
_ones ::= six
_ones ::= seven
_ones ::= eight
_ones ::= nine
_ones ::= to
_ones ::= for
'''
return self.small_numbers[args[0].type]
def p__firstnumbers(self, args):
'''
_firstnumbers ::= zero
_firstnumbers ::= one
_firstnumbers ::= two
_firstnumbers ::= three
_firstnumbers ::= four
_firstnumbers ::= five
_firstnumbers ::= six
_firstnumbers ::= seven
_firstnumbers ::= eight
_firstnumbers ::= nine
_firstnumbers ::= ten
_firstnumbers ::= eleven
_firstnumbers ::= twelve
_firstnumbers ::= thirteen
_firstnumbers ::= fourteen
_firstnumbers ::= fifteen
_firstnumbers ::= sixteen
_firstnumbers ::= seventeen
_firstnumbers ::= eighteen
_firstnumbers ::= nineteen
_firstnumbers ::= to
_firstnumbers ::= for
'''
return self.small_numbers[args[0].type]
def p__tens(self, args):
'''
_tens ::= twenty
_tens ::= thirty
_tens ::= forty
_tens ::= fifty
_tens ::= sixty
_tens ::= seventy
_tens ::= eighty
_tens ::= ninety
'''
value = {
'twenty' : 20,
'thirty' : 30,
'forty' : 40,
'fifty' : 50,
'sixty' : 60,
'seventy' : 70,
'eighty' : 80,
'ninety' : 90
}
return value[args[0].type]
def p__hundreds(self, args):
'''
_hundreds ::= _ones hundred
'''
return args[0] * 100
def p_thousand_number_set(self, args):
'''
thousand_number_set ::= number_set thousand
thousand_number_set ::= number_set thousand number_set
'''
total = args[0] * 1000
if len(args) > 2: total += args[2]
return total
def p_million_number_set(self, args):
'''
million_number_set ::= number_set million
million_number_set ::= number_set million number_set
million_number_set ::= number_set million thousand_number_set
'''
total = args[0] * 1000000
if len(args) > 2: total += args[2]
return total
def p_billion_number_set(self, args):
'''
billion_number_set ::= number_set billion
billion_number_set ::= number_set billion number_set
billion_number_set ::= number_set billion thousand_number_set
billion_number_set ::= number_set billion million_number_set
'''
total = args[0] * 1000000000
if len(args) > 2: total += args[2]
return total
def p_sky_letter(self, args):
'''
sky_letter ::= sky letter
'''
ast = args[1]
ast.meta[0] = ast.meta[0].upper()
return ast
def p_letter(self, args):
'''
letter ::= arch
letter ::= bravo
letter ::= charlie
letter ::= delta
letter ::= eco
letter ::= echo
letter ::= fox
letter ::= golf
letter ::= hotel
letter ::= india
letter ::= julia
letter ::= kilo
letter ::= line
letter ::= mike
letter ::= november
letter ::= oscar
letter ::= papa
letter ::= queen
letter ::= romeo
letter ::= sierra
letter ::= tango
letter ::= uniform
letter ::= victor
letter ::= whiskey
letter ::= whisky
letter ::= xray
letter ::= expert
letter ::= yankee
letter ::= zulu
'''
if(args[0].type == 'expert'): args[0].type = 'x'
return AST('char', [ args[0].type[0] ])
def p_character(self, args):
'''
character ::= act
character ::= colon
character ::= semicolon
character ::= single quote
character ::= double quote
character ::= equal
character ::= space
character ::= tab
character ::= bang
character ::= hash
character ::= dollar
character ::= percent
character ::= carrot
character ::= ampersand
character ::= star
character ::= late
character ::= rate
character ::= minus
character ::= dash
character ::= underscore
character ::= plus
character ::= backslash
character ::= dot
character ::= dit
character ::= slash
character ::= question
character ::= comma
'''
value = {
'act' : 'Escape',
'colon' : 'colon',
'semicolon' : 'semicolon',
'single': 'apostrophe',
'double': 'quotedbl',
'equal' : 'equal',
'space' : 'space',
'tab' : 'Tab',
'bang' : 'exclam',
'hash' : 'numbersign',
'dollar': 'dollar',
'percent': 'percent',
'carrot': 'caret',
'ampersand': 'ampersand',
'star': 'asterisk',
'late': 'parenleft',
'rate': 'parenright',
'minus': 'minus',
'dash': 'minus',
'underscore': 'underscore',
'plus': 'plus',
'backslash': 'backslash',
'dot': 'period',
'dit': 'period',
'slash': 'slash',
'question': 'question',
'comma': 'comma'
}
return AST('raw_char', [ value[args[0].type] ])
def p_editing(self, args):
'''
editing ::= slap repeat
editing ::= scratch repeat
'''
value = {
'slap' : 'Return',
'scratch': 'BackSpace'
}
if args[1] != None:
return AST('repeat', [ args[1] ], [
AST('raw_char', [ value[args[0].type] ])
])
else:
return AST('raw_char', [ value[args[0].type] ])
def p_modifiers(self, args):
'''
modifiers ::= control single_command
modifiers ::= alt single_command
modifiers ::= alternative single_command
modifiers ::= super single_command
modifiers ::= shift single_command
'''
value = {
'control' : 'ctrl',
'alt' : 'alt',
'alternative' : 'alt',
'super' : 'Super_L',
'shift' : 'Shift_L'
}
if(args[1].type == 'mod_plus_key'):
args[1].meta.insert(0, value[args[0].type])
return args[1]
else:
return AST('mod_plus_key', [ value[args[0].type] ], [ args[1] ] )
def p_english(self, args):
'''
english ::= word ANY
'''
return AST('sequence', [ args[1].extra ])
def p_word_sentence(self, args):
'''
word_sentence ::= sentence word_repeat
'''
if(len(args[1].children) > 0):
args[1].children[0].meta = args[1].children[0].meta.capitalize()
return args[1]
def p_word_phrase(self, args):
'''
word_phrase ::= phrase word_repeat
'''
return args[1]
def p_word_repeat(self, args):
'''
word_repeat ::= raw_word
word_repeat ::= raw_word word_repeat
'''
if(len(args) == 1):
return AST('word_sequence', None,
[ AST('null', args[0]) ])
else:
args[1].children.insert(0, AST('null', args[0]))
return args[1]
def p_raw_word(self, args):
'''
raw_word ::= ANY
raw_word ::= zero
raw_word ::= one
raw_word ::= two
raw_word ::= three
raw_word ::= four
raw_word ::= five
raw_word ::= six
raw_word ::= seven
raw_word ::= eight
raw_word ::= nine
raw_word ::= to
raw_word ::= for
'''
if(args[0].type == 'ANY'):
return args[0].extra
return args[0].type
class SingleInputParser(CoreParser):
def __init__(self):
# if you have the issue that commands fail because spurious
# tokens ('i', 'the',...) are prepended to the acual command,
# try commenting the 'single_input' line, and uncommenting
# the 'single_input_discard_junk' line.
CoreParser.__init__(self, 'single_input')
#CoreParser.__init__(self, 'single_input_discard_junk')
self.sleeping = False
def p_sleep_commands(self, args):
'''
sleep_commands ::= go to sleep
sleep_commands ::= start listening
'''
if args[-1].type == 'sleep':
self.sleeping = True
print 'Going to sleep.'
else:
self.sleeping = False
print 'Waking from sleep'
return AST('')
def p_single_input(self, args):
'''
single_input ::= END
single_input ::= sleep_commands END
single_input ::= chained_commands END
'''
if len(args) > 0 and not self.sleeping:
return args[0]
else:
return AST('')
def p_single_input_discard_junk(self, args):
'''
single_input_discard_junk ::= END
single_input_discard_junk ::= junk_tokens sleep_commands END
single_input_discard_junk ::= junk_tokens chained_commands END
'''
if len(args) > 1 and not self.sleeping:
return args[1]
else:
return AST('')
# With some models, Kaldi may return spurious tokens in response
# to noise. If that happens just before we say a command, it will
# make the command fail. This "dummy" rule will swallows these tokens.
def p_junk_tokens(self, args):
'''
junk_tokens ::=
junk_tokens ::= i junk_tokens
junk_tokens ::= the junk_tokens
junk_tokens ::= a junk_tokens
junk_tokens ::= and junk_tokens
'''
return AST('')
def parse(parser, tokens):
return parser.parse(tokens)
|
|
import base64
import datetime
import logging
import urllib
from functools import wraps
from io import BytesIO
from typing import Callable, Dict, Optional, Sequence, Tuple, TypeVar, Union, cast, overload
import django_otp
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth import login as django_login
from django.contrib.auth.decorators import user_passes_test as django_user_passes_test
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth.views import redirect_to_login
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, QueryDict
from django.http.multipartparser import MultiPartParser
from django.shortcuts import resolve_url
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from django.views.decorators.csrf import csrf_exempt
from django_otp import user_has_device
from two_factor.utils import default_device
from zerver.lib.exceptions import (
AccessDeniedError,
ErrorCode,
InvalidAPIKeyError,
InvalidAPIKeyFormatError,
InvalidJSONError,
JsonableError,
OrganizationAdministratorRequired,
OrganizationMemberRequired,
OrganizationOwnerRequired,
RateLimited,
RealmDeactivatedError,
UnsupportedWebhookEventType,
UserDeactivatedError,
)
from zerver.lib.queue import queue_json_publish
from zerver.lib.rate_limiter import RateLimitedIPAddr, RateLimitedUser
from zerver.lib.request import REQ, get_request_notes, has_request_variables
from zerver.lib.response import json_method_not_allowed, json_success, json_unauthorized
from zerver.lib.subdomains import get_subdomain, user_matches_subdomain
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.types import ViewFuncT
from zerver.lib.utils import has_api_key_format, statsd
from zerver.models import Realm, UserProfile, get_client, get_user_profile_by_api_key
if settings.ZILENCER_ENABLED:
from zilencer.models import (
RateLimitedRemoteZulipServer,
RemoteZulipServer,
get_remote_server_by_uuid,
)
rate_limiter_logger = logging.getLogger("zerver.lib.rate_limiter")
webhook_logger = logging.getLogger("zulip.zerver.webhooks")
webhook_unsupported_events_logger = logging.getLogger("zulip.zerver.webhooks.unsupported")
FuncT = TypeVar("FuncT", bound=Callable[..., object])
def cachify(method: FuncT) -> FuncT:
dct: Dict[Tuple[object, ...], object] = {}
def cache_wrapper(*args: object) -> object:
tup = tuple(args)
if tup in dct:
return dct[tup]
result = method(*args)
dct[tup] = result
return result
return cast(FuncT, cache_wrapper) # https://github.com/python/mypy/issues/1927
def update_user_activity(
request: HttpRequest, user_profile: UserProfile, query: Optional[str]
) -> None:
# update_active_status also pushes to RabbitMQ, and it seems
# redundant to log that here as well.
if request.META["PATH_INFO"] == "/json/users/me/presence":
return
request_notes = get_request_notes(request)
if query is not None:
pass
elif request_notes.query is not None:
query = request_notes.query
else:
query = request.META["PATH_INFO"]
assert request_notes.client is not None
event = {
"query": query,
"user_profile_id": user_profile.id,
"time": datetime_to_timestamp(timezone_now()),
"client_id": request_notes.client.id,
}
queue_json_publish("user_activity", event, lambda event: None)
# Based on django.views.decorators.http.require_http_methods
def require_post(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapper(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
if request.method != "POST":
err_method = request.method
logging.warning(
"Method Not Allowed (%s): %s",
err_method,
request.path,
extra={"status_code": 405, "request": request},
)
if get_request_notes(request).error_format == "JSON":
return json_method_not_allowed(["POST"])
else:
return TemplateResponse(
request, "404.html", context={"status_code": 405}, status=405
)
return func(request, *args, **kwargs)
return cast(ViewFuncT, wrapper) # https://github.com/python/mypy/issues/1927
def require_realm_owner(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapper(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if not user_profile.is_realm_owner:
raise OrganizationOwnerRequired()
return func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, wrapper) # https://github.com/python/mypy/issues/1927
def require_realm_admin(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapper(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if not user_profile.is_realm_admin:
raise OrganizationAdministratorRequired()
return func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, wrapper) # https://github.com/python/mypy/issues/1927
def require_organization_member(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapper(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if user_profile.role > UserProfile.ROLE_MEMBER:
raise OrganizationMemberRequired()
return func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, wrapper) # https://github.com/python/mypy/issues/1927
def require_billing_access(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapper(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if not user_profile.has_billing_access:
raise JsonableError(_("Must be a billing administrator or an organization owner"))
return func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, wrapper) # https://github.com/python/mypy/issues/1927
def process_client(
request: HttpRequest,
user: Union[UserProfile, AnonymousUser],
*,
is_browser_view: bool = False,
client_name: Optional[str] = None,
skip_update_user_activity: bool = False,
query: Optional[str] = None,
) -> None:
request_notes = get_request_notes(request)
if client_name is None:
client_name = request_notes.client_name
assert client_name is not None
# We could check for a browser's name being "Mozilla", but
# e.g. Opera and MobileSafari don't set that, and it seems
# more robust to just key off whether it was a browser view
if is_browser_view and not client_name.startswith("Zulip"):
# Avoid changing the client string for browsers, but let
# the Zulip desktop apps be themselves.
client_name = "website"
request_notes.client = get_client(client_name)
if not skip_update_user_activity and user.is_authenticated:
update_user_activity(request, user, query)
class InvalidZulipServerError(JsonableError):
code = ErrorCode.INVALID_ZULIP_SERVER
data_fields = ["role"]
def __init__(self, role: str) -> None:
self.role: str = role
@staticmethod
def msg_format() -> str:
return "Zulip server auth failure: {role} is not registered"
class InvalidZulipServerKeyError(InvalidZulipServerError):
@staticmethod
def msg_format() -> str:
return "Zulip server auth failure: key does not match role {role}"
def validate_api_key(
request: HttpRequest,
role: Optional[str],
api_key: str,
allow_webhook_access: bool = False,
client_name: Optional[str] = None,
) -> Union[UserProfile, "RemoteZulipServer"]:
# Remove whitespace to protect users from trivial errors.
api_key = api_key.strip()
if role is not None:
role = role.strip()
# If `role` doesn't look like an email, it might be a uuid.
if settings.ZILENCER_ENABLED and role is not None and "@" not in role:
try:
remote_server = get_remote_server_by_uuid(role)
except RemoteZulipServer.DoesNotExist:
raise InvalidZulipServerError(role)
if api_key != remote_server.api_key:
raise InvalidZulipServerKeyError(role)
if get_subdomain(request) != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
raise JsonableError(_("Invalid subdomain for push notifications bouncer"))
request.user = remote_server
remote_server.rate_limits = ""
# Skip updating UserActivity, since remote_server isn't actually a UserProfile object.
process_client(request, remote_server, skip_update_user_activity=True)
return remote_server
user_profile = access_user_by_api_key(request, api_key, email=role)
if user_profile.is_incoming_webhook and not allow_webhook_access:
raise JsonableError(_("This API is not available to incoming webhook bots."))
request.user = user_profile
process_client(request, user_profile, client_name=client_name)
return user_profile
def validate_account_and_subdomain(request: HttpRequest, user_profile: UserProfile) -> None:
if user_profile.realm.deactivated:
raise RealmDeactivatedError()
if not user_profile.is_active:
raise UserDeactivatedError()
# Either the subdomain matches, or we're accessing Tornado from
# and to localhost (aka spoofing a request as the user).
if not user_matches_subdomain(get_subdomain(request), user_profile) and not (
settings.RUNNING_INSIDE_TORNADO
and request.META["SERVER_NAME"] == "127.0.0.1"
and request.META["REMOTE_ADDR"] == "127.0.0.1"
):
logging.warning(
"User %s (%s) attempted to access API on wrong subdomain (%s)",
user_profile.delivery_email,
user_profile.realm.subdomain,
get_subdomain(request),
)
raise JsonableError(_("Account is not associated with this subdomain"))
def access_user_by_api_key(
request: HttpRequest, api_key: str, email: Optional[str] = None
) -> UserProfile:
if not has_api_key_format(api_key):
raise InvalidAPIKeyFormatError()
try:
user_profile = get_user_profile_by_api_key(api_key)
except UserProfile.DoesNotExist:
raise InvalidAPIKeyError()
if email is not None and email.lower() != user_profile.delivery_email.lower():
# This covers the case that the API key is correct, but for a
# different user. We may end up wanting to relaxing this
# constraint or give a different error message in the future.
raise InvalidAPIKeyError()
validate_account_and_subdomain(request, user_profile)
return user_profile
def log_exception_to_webhook_logger(
summary: str,
unsupported_event: bool,
) -> None:
if unsupported_event:
webhook_unsupported_events_logger.exception(summary, stack_info=True)
else:
webhook_logger.exception(summary, stack_info=True)
def full_webhook_client_name(raw_client_name: Optional[str] = None) -> Optional[str]:
if raw_client_name is None:
return None
return f"Zulip{raw_client_name}Webhook"
# Use this for webhook views that don't get an email passed in.
def webhook_view(
webhook_client_name: str,
notify_bot_owner_on_invalid_json: bool = True,
all_event_types: Optional[Sequence[str]] = None,
) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]:
# Unfortunately, callback protocols are insufficient for this:
# https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols
# Variadic generics are necessary: https://github.com/python/typing/issues/193
def _wrapped_view_func(view_func: Callable[..., HttpResponse]) -> Callable[..., HttpResponse]:
@csrf_exempt
@has_request_variables
@wraps(view_func)
def _wrapped_func_arguments(
request: HttpRequest, api_key: str = REQ(), *args: object, **kwargs: object
) -> HttpResponse:
user_profile = validate_api_key(
request,
None,
api_key,
allow_webhook_access=True,
client_name=full_webhook_client_name(webhook_client_name),
)
if settings.RATE_LIMITING:
rate_limit_user(request, user_profile, domain="api_by_user")
try:
return view_func(request, user_profile, *args, **kwargs)
except Exception as err:
if isinstance(err, InvalidJSONError) and notify_bot_owner_on_invalid_json:
# NOTE: importing this at the top of file leads to a
# cyclic import; correct fix is probably to move
# notify_bot_owner_about_invalid_json to a smaller file.
from zerver.lib.webhooks.common import notify_bot_owner_about_invalid_json
notify_bot_owner_about_invalid_json(user_profile, webhook_client_name)
elif isinstance(err, JsonableError) and not isinstance(
err, UnsupportedWebhookEventType
):
pass
else:
if isinstance(err, UnsupportedWebhookEventType):
err.webhook_name = webhook_client_name
log_exception_to_webhook_logger(
summary=str(err),
unsupported_event=isinstance(err, UnsupportedWebhookEventType),
)
raise err
_wrapped_func_arguments._all_event_types = all_event_types
return _wrapped_func_arguments
return _wrapped_view_func
# From Django 2.2, modified to pass the request rather than just the
# user into test_func; this is useful so that we can revalidate the
# subdomain matches the user's realm. It is likely that we could make
# the subdomain validation happen elsewhere and switch to using the
# stock Django version.
def user_passes_test(
test_func: Callable[[HttpRequest], bool],
login_url: Optional[str] = None,
redirect_field_name: str = REDIRECT_FIELD_NAME,
) -> Callable[[ViewFuncT], ViewFuncT]:
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
if test_func(request):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login URL is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urllib.parse.urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urllib.parse.urlparse(path)[:2]
if (not login_scheme or login_scheme == current_scheme) and (
not login_netloc or login_netloc == current_netloc
):
path = request.get_full_path()
# TODO: Restore testing for this case; it was removed when
# we enabled web-public stream testing on /.
if path == "/": # nocoverage
# Don't add ?next=/, to keep our URLs clean
return HttpResponseRedirect(resolved_login_url)
return redirect_to_login(path, resolved_login_url, redirect_field_name)
return cast(ViewFuncT, _wrapped_view) # https://github.com/python/mypy/issues/1927
return decorator
def logged_in_and_active(request: HttpRequest) -> bool:
if not request.user.is_authenticated:
return False
if not request.user.is_active:
return False
if request.user.realm.deactivated:
return False
return user_matches_subdomain(get_subdomain(request), request.user)
def do_two_factor_login(request: HttpRequest, user_profile: UserProfile) -> None:
device = default_device(user_profile)
if device:
django_otp.login(request, device)
def do_login(request: HttpRequest, user_profile: UserProfile) -> None:
"""Creates a session, logging in the user, using the Django method,
and also adds helpful data needed by our server logs.
"""
django_login(request, user_profile)
get_request_notes(request).requestor_for_logs = user_profile.format_requestor_for_logs()
process_client(request, user_profile, is_browser_view=True)
if settings.TWO_FACTOR_AUTHENTICATION_ENABLED:
# Log in with two factor authentication as well.
do_two_factor_login(request, user_profile)
def log_view_func(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
get_request_notes(request).query = view_func.__name__
return view_func(request, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def add_logging_data(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
process_client(request, request.user, is_browser_view=True, query=view_func.__name__)
return rate_limit()(view_func)(request, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def human_users_only(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
assert request.user.is_authenticated
if request.user.is_bot:
raise JsonableError(_("This endpoint does not accept bot requests."))
return view_func(request, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
@overload
def zulip_login_required(
function: ViewFuncT,
redirect_field_name: str = REDIRECT_FIELD_NAME,
login_url: str = settings.HOME_NOT_LOGGED_IN,
) -> ViewFuncT:
...
@overload
def zulip_login_required(
function: None,
redirect_field_name: str = REDIRECT_FIELD_NAME,
login_url: str = settings.HOME_NOT_LOGGED_IN,
) -> Callable[[ViewFuncT], ViewFuncT]:
...
# Based on Django 1.8's @login_required
def zulip_login_required(
function: Optional[ViewFuncT] = None,
redirect_field_name: str = REDIRECT_FIELD_NAME,
login_url: str = settings.HOME_NOT_LOGGED_IN,
) -> Union[Callable[[ViewFuncT], ViewFuncT], ViewFuncT]:
actual_decorator = lambda function: user_passes_test(
logged_in_and_active,
login_url=login_url,
redirect_field_name=redirect_field_name,
)(
zulip_otp_required(
redirect_field_name=redirect_field_name,
login_url=login_url,
)(add_logging_data(function))
)
if function:
return actual_decorator(function)
return actual_decorator # nocoverage # We don't use this without a function
def web_public_view(
view_func: ViewFuncT,
redirect_field_name: str = REDIRECT_FIELD_NAME,
login_url: str = settings.HOME_NOT_LOGGED_IN,
) -> Union[Callable[[ViewFuncT], ViewFuncT], ViewFuncT]:
"""
This wrapper adds client info for unauthenticated users but
forces authenticated users to go through 2fa.
NOTE: This function == zulip_login_required in a production environment as
web_public_view path has only been enabled for development purposes
currently.
"""
if not settings.DEVELOPMENT:
# Coverage disabled because DEVELOPMENT is always true in development.
return zulip_login_required(view_func, redirect_field_name, login_url) # nocoverage
actual_decorator = lambda view_func: zulip_otp_required(
redirect_field_name=redirect_field_name, login_url=login_url
)(add_logging_data(view_func))
return actual_decorator(view_func)
def require_server_admin(view_func: ViewFuncT) -> ViewFuncT:
@zulip_login_required
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
if not request.user.is_staff:
return HttpResponseRedirect(settings.HOME_NOT_LOGGED_IN)
return add_logging_data(view_func)(request, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def require_server_admin_api(view_func: ViewFuncT) -> ViewFuncT:
@zulip_login_required
@wraps(view_func)
def _wrapped_view_func(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if not user_profile.is_staff:
raise JsonableError(_("Must be an server administrator"))
return view_func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def require_non_guest_user(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if user_profile.is_guest:
raise JsonableError(_("Not allowed for guest users"))
return view_func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def require_member_or_admin(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if user_profile.is_guest:
raise JsonableError(_("Not allowed for guest users"))
if user_profile.is_bot:
raise JsonableError(_("This endpoint does not accept bot requests."))
return view_func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def require_user_group_edit_permission(view_func: ViewFuncT) -> ViewFuncT:
@require_member_or_admin
@wraps(view_func)
def _wrapped_view_func(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if not user_profile.can_edit_user_groups():
raise JsonableError(_("Insufficient permission"))
return view_func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
# This API endpoint is used only for the mobile apps. It is part of a
# workaround for the fact that React Native doesn't support setting
# HTTP basic authentication headers.
def authenticated_uploads_api_view(
skip_rate_limiting: bool = False,
) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]:
def _wrapped_view_func(view_func: Callable[..., HttpResponse]) -> Callable[..., HttpResponse]:
@csrf_exempt
@has_request_variables
@wraps(view_func)
def _wrapped_func_arguments(
request: HttpRequest, api_key: str = REQ(), *args: object, **kwargs: object
) -> HttpResponse:
user_profile = validate_api_key(request, None, api_key, False)
if not skip_rate_limiting:
limited_func = rate_limit()(view_func)
else:
limited_func = view_func
return limited_func(request, user_profile, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
# A more REST-y authentication decorator, using, in particular, HTTP basic
# authentication.
#
# If webhook_client_name is specific, the request is a webhook view
# with that string as the basis for the client string.
def authenticated_rest_api_view(
*,
webhook_client_name: Optional[str] = None,
allow_webhook_access: bool = False,
skip_rate_limiting: bool = False,
) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]:
if webhook_client_name is not None:
allow_webhook_access = True
def _wrapped_view_func(view_func: Callable[..., HttpResponse]) -> Callable[..., HttpResponse]:
@csrf_exempt
@wraps(view_func)
def _wrapped_func_arguments(
request: HttpRequest, *args: object, **kwargs: object
) -> HttpResponse:
# First try block attempts to get the credentials we need to do authentication
try:
# Grab the base64-encoded authentication string, decode it, and split it into
# the email and API key
auth_type, credentials = request.META["HTTP_AUTHORIZATION"].split()
# case insensitive per RFC 1945
if auth_type.lower() != "basic":
raise JsonableError(_("This endpoint requires HTTP basic authentication."))
role, api_key = base64.b64decode(credentials).decode().split(":")
except ValueError:
return json_unauthorized(_("Invalid authorization header for basic auth"))
except KeyError:
return json_unauthorized(_("Missing authorization header for basic auth"))
# Now we try to do authentication or die
try:
# profile is a Union[UserProfile, RemoteZulipServer]
profile = validate_api_key(
request,
role,
api_key,
allow_webhook_access=allow_webhook_access,
client_name=full_webhook_client_name(webhook_client_name),
)
except JsonableError as e:
return json_unauthorized(e.msg)
try:
if not skip_rate_limiting:
# Apply rate limiting
target_view_func = rate_limit()(view_func)
else:
target_view_func = view_func
return target_view_func(request, profile, *args, **kwargs)
except Exception as err:
if not webhook_client_name:
raise err
if isinstance(err, JsonableError) and not isinstance(
err, UnsupportedWebhookEventType
): # nocoverage
raise err
if isinstance(err, UnsupportedWebhookEventType):
err.webhook_name = webhook_client_name
log_exception_to_webhook_logger(
summary=str(err),
unsupported_event=isinstance(err, UnsupportedWebhookEventType),
)
raise err
return _wrapped_func_arguments
return _wrapped_view_func
def process_as_post(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
# Adapted from django/http/__init__.py.
# So by default Django doesn't populate request.POST for anything besides
# POST requests. We want this dict populated for PATCH/PUT, so we have to
# do it ourselves.
#
# This will not be required in the future, a bug will be filed against
# Django upstream.
if not request.POST:
# Only take action if POST is empty.
if request.META.get("CONTENT_TYPE", "").startswith("multipart"):
# Note that request._files is just the private attribute that backs the
# FILES property, so we are essentially setting request.FILES here. (In
# Django 1.5 FILES was still a read-only property.)
request.POST, request._files = MultiPartParser(
request.META,
BytesIO(request.body),
request.upload_handlers,
request.encoding,
).parse()
else:
request.POST = QueryDict(request.body, encoding=request.encoding)
return view_func(request, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def authenticate_log_and_execute_json(
request: HttpRequest,
view_func: ViewFuncT,
*args: object,
skip_rate_limiting: bool = False,
allow_unauthenticated: bool = False,
**kwargs: object,
) -> HttpResponse:
if not skip_rate_limiting:
limited_view_func = rate_limit()(view_func)
else:
limited_view_func = view_func
if not request.user.is_authenticated:
if not allow_unauthenticated:
return json_unauthorized()
process_client(
request,
request.user,
is_browser_view=True,
skip_update_user_activity=True,
query=view_func.__name__,
)
return limited_view_func(request, request.user, *args, **kwargs)
user_profile = request.user
validate_account_and_subdomain(request, user_profile)
if user_profile.is_incoming_webhook:
raise JsonableError(_("Webhook bots can only access webhooks"))
process_client(request, user_profile, is_browser_view=True, query=view_func.__name__)
return limited_view_func(request, user_profile, *args, **kwargs)
# Checks if the user is logged in. If not, return an error (the
# @login_required behavior of redirecting to a login page doesn't make
# sense for json views)
def authenticated_json_view(
view_func: Callable[..., HttpResponse],
skip_rate_limiting: bool = False,
allow_unauthenticated: bool = False,
) -> Callable[..., HttpResponse]:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
return authenticate_log_and_execute_json(
request,
view_func,
*args,
skip_rate_limiting=skip_rate_limiting,
allow_unauthenticated=allow_unauthenticated,
**kwargs,
)
return _wrapped_view_func
def is_local_addr(addr: str) -> bool:
return addr in ("127.0.0.1", "::1")
# These views are used by the main Django server to notify the Tornado server
# of events. We protect them from the outside world by checking a shared
# secret, and also the originating IP (for now).
def authenticate_notify(request: HttpRequest) -> bool:
return (
is_local_addr(request.META["REMOTE_ADDR"])
and request.POST.get("secret") == settings.SHARED_SECRET
)
def client_is_exempt_from_rate_limiting(request: HttpRequest) -> bool:
# Don't rate limit requests from Django that come from our own servers,
# and don't rate-limit dev instances
client = get_request_notes(request).client
return (client is not None and client.name.lower() == "internal") and (
is_local_addr(request.META["REMOTE_ADDR"]) or settings.DEBUG_RATE_LIMITING
)
def internal_notify_view(
is_tornado_view: bool,
) -> Callable[[ViewFuncT], Callable[..., HttpResponse]]:
# The typing here could be improved by using the extended Callable types:
# https://mypy.readthedocs.io/en/stable/additional_features.html#extended-callable-types
"""Used for situations where something running on the Zulip server
needs to make a request to the (other) Django/Tornado processes running on
the server."""
def _wrapped_view_func(view_func: ViewFuncT) -> Callable[..., HttpResponse]:
@csrf_exempt
@require_post
@wraps(view_func)
def _wrapped_func_arguments(
request: HttpRequest, *args: object, **kwargs: object
) -> HttpResponse:
if not authenticate_notify(request):
raise AccessDeniedError()
request_notes = get_request_notes(request)
is_tornado_request = request_notes.tornado_handler is not None
# These next 2 are not security checks; they are internal
# assertions to help us find bugs.
if is_tornado_view and not is_tornado_request:
raise RuntimeError("Tornado notify view called with no Tornado handler")
if not is_tornado_view and is_tornado_request:
raise RuntimeError("Django notify view called with Tornado handler")
request_notes.requestor_for_logs = "internal"
return view_func(request, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
def to_utc_datetime(timestamp: str) -> datetime.datetime:
return timestamp_to_datetime(float(timestamp))
def statsd_increment(counter: str, val: int = 1) -> Callable[[FuncT], FuncT]:
"""Increments a statsd counter on completion of the
decorated function.
Pass the name of the counter to this decorator-returning function."""
def wrapper(func: FuncT) -> FuncT:
@wraps(func)
def wrapped_func(*args: object, **kwargs: object) -> object:
ret = func(*args, **kwargs)
statsd.incr(counter, val)
return ret
return cast(FuncT, wrapped_func) # https://github.com/python/mypy/issues/1927
return wrapper
def rate_limit_user(request: HttpRequest, user: UserProfile, domain: str) -> None:
"""Returns whether or not a user was rate limited. Will raise a RateLimited exception
if the user has been rate limited, otherwise returns and modifies request to contain
the rate limit information"""
RateLimitedUser(user, domain=domain).rate_limit_request(request)
def rate_limit_ip(request: HttpRequest, ip_addr: str, domain: str) -> None:
RateLimitedIPAddr(ip_addr, domain=domain).rate_limit_request(request)
def rate_limit_request_by_ip(request: HttpRequest, domain: str) -> None:
# REMOTE_ADDR is set by SetRemoteAddrFromRealIpHeader in conjunction
# with the nginx configuration to guarantee this to be *the* correct
# IP address to use - without worrying we'll grab the IP of a proxy.
ip_addr = request.META["REMOTE_ADDR"]
assert ip_addr
rate_limit_ip(request, ip_addr, domain=domain)
def rate_limit_remote_server(
request: HttpRequest, remote_server: "RemoteZulipServer", domain: str
) -> None:
try:
RateLimitedRemoteZulipServer(remote_server, domain=domain).rate_limit_request(request)
except RateLimited as e:
rate_limiter_logger.warning(
"Remote server %s exceeded rate limits on domain %s", remote_server, domain
)
raise e
def rate_limit() -> Callable[[ViewFuncT], ViewFuncT]:
"""Rate-limits a view. Returns a decorator"""
def wrapper(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapped_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
# It is really tempting to not even wrap our original function
# when settings.RATE_LIMITING is False, but it would make
# for awkward unit testing in some situations.
if not settings.RATE_LIMITING:
return func(request, *args, **kwargs)
if client_is_exempt_from_rate_limiting(request):
return func(request, *args, **kwargs)
user = request.user
if isinstance(user, AnonymousUser):
rate_limit_request_by_ip(request, domain="api_by_ip")
return func(request, *args, **kwargs)
elif settings.ZILENCER_ENABLED and isinstance(user, RemoteZulipServer):
rate_limit_remote_server(request, user, domain="api_by_remote_server")
else:
assert isinstance(user, UserProfile)
rate_limit_user(request, user, domain="api_by_user")
return func(request, *args, **kwargs)
return cast(ViewFuncT, wrapped_func) # https://github.com/python/mypy/issues/1927
return wrapper
def return_success_on_head_request(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
if request.method == "HEAD":
return json_success()
return view_func(request, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def zulip_otp_required(
redirect_field_name: str = "next",
login_url: str = settings.HOME_NOT_LOGGED_IN,
) -> Callable[[ViewFuncT], ViewFuncT]:
"""
The reason we need to create this function is that the stock
otp_required decorator doesn't play well with tests. We cannot
enable/disable if_configured parameter during tests since the decorator
retains its value due to closure.
Similar to :func:`~django.contrib.auth.decorators.login_required`, but
requires the user to be :term:`verified`. By default, this redirects users
to :setting:`OTP_LOGIN_URL`.
"""
def test(user: UserProfile) -> bool:
"""
:if_configured: If ``True``, an authenticated user with no confirmed
OTP devices will be allowed. Also, non-authenticated users will be
allowed as spectator users. Default is ``False``. If ``False``,
2FA will not do any authentication.
"""
if_configured = settings.TWO_FACTOR_AUTHENTICATION_ENABLED
if not if_configured:
return True
# User has completed 2FA verification
if user.is_verified():
return True
# This request is unauthenticated (logged-out) access; 2FA is
# not required or possible.
#
# TODO: Add a test for 2FA-enabled with web-public views.
if not user.is_authenticated: # nocoverage
return True
# If the user doesn't have 2FA set up, we can't enforce 2FA.
if not user_has_device(user):
return True
# User has configured 2FA and is not verified, so the user
# fails the test (and we should redirect to the 2FA view).
return False
decorator = django_user_passes_test(
test, login_url=login_url, redirect_field_name=redirect_field_name
)
return decorator
def add_google_analytics_context(context: Dict[str, object]) -> None:
if settings.GOOGLE_ANALYTICS_ID is not None: # nocoverage
page_params = context.setdefault("page_params", {})
assert isinstance(page_params, dict)
page_params["google_analytics_id"] = settings.GOOGLE_ANALYTICS_ID
def add_google_analytics(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
response = view_func(request, *args, **kwargs)
if isinstance(response, SimpleTemplateResponse):
if response.context_data is None:
response.context_data = {}
add_google_analytics_context(response.context_data)
elif response.status_code == 200: # nocoverage
raise TypeError("add_google_analytics requires a TemplateResponse")
return response
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
|
|
import sys
import cPickle as pickle
import networkx as nx
import string
from collections import OrderedDict
import pdb
def traverse_depth_first(concept_nx_graph, parent=None):
node_list = [] #list of pairs (concept_instance, concept_var_name) e.g. ('establish-01', 'e')
if parent == None:
parent = nx.topological_sort(concept_nx_graph)[0]
node_list.append((concept_nx_graph.node[parent]['instance'], parent))
children = []
for child in concept_nx_graph.successors(parent):
if concept_nx_graph.node[child]['parent'] == parent:
children.append(child)
if not children:
return node_list
ordered_children = [None]*len(children)
order = []
for child in children:
order.append(concept_nx_graph.node[child]['child_num'])
diff = max(order) + 1 - len(order)
for child in children:
ordered_children[concept_nx_graph.node[child]['child_num'] - diff] = child
for child in ordered_children:
node_list.extend(traverse_depth_first(concept_nx_graph, parent=child))
return node_list
concept_graph_fragment_dict = {}
def create_span_concept_data(sentence, span_concept, pos_line, ner_line):
span_concept_data = []
words = sentence.split()
words_pos = pos_line.split()
i = 0
vw_idx = 1
concept_vw_idx_dict = {}
global concept_graph_fragment_dict
while i < len(words):
span_start = str(i)
if span_concept.has_key(span_start):
span_end, span, concept_nx_graph = span_concept[span_start]
span_from_pos = [word_pos.split("_")[0] for word_pos in words_pos[int(span_start):int(span_end)]]
assert(span == span_from_pos)
pos = [word_pos.split("_")[1] for word_pos in words_pos[int(span_start):int(span_end)]]
node_list = traverse_depth_first(concept_nx_graph)
concepts = []
concept_short_names = []
for (concept_instance, concept_var_name) in node_list:
concepts.append(concept_instance)
concept_short_names.append(concept_var_name)
concept = "_".join(concepts)
if concept not in all_concepts:
all_concepts.append(concept)
if concept_nx_graph.nodes() > 1:
concept_graph_fragment_dict[concept] = concept_nx_graph
concept_short_name = "_".join(concept_short_names)
concept_nx_graph_root = nx.topological_sort(concept_nx_graph)[0]
span_concept_data.append([" ".join(span).lower(), " ".join(pos), concept, concept_short_name, " ".join(ner_line.split()[int(span_start):int(span_end)]), concept_nx_graph_root, all_concepts.index(concept)])
#concept_vw_idx_dict[concept_nx_graph_root] = vw_idx
for n in concept_nx_graph.nodes():
concept_vw_idx_dict[n] = vw_idx #assign all nodes in the fragment the same vw_idx so that all outgoing nodes from this fragment are assigned the same vw_idx parent
i = int(span_end)
else:
[word_from_pos, pos] = words_pos[i].rsplit("_", 1)
assert(words[i] == word_from_pos)
concept = "NULL"
span_concept_data.append([words[i].lower(), pos, concept, "NULL", ner_line.split()[i], None, all_concepts.index(concept)])
i += 1
vw_idx += 1
return span_concept_data, concept_vw_idx_dict
visited_nodes = []
def get_node_paths(parent_path, parent, amr_nx_graph):
#print parent_path
if parent in visited_nodes:
return {}
visited_nodes.append(parent)
if not amr_nx_graph.successors(parent):
return {}
node_paths = {}
for child in amr_nx_graph.successors(parent):
child_path = parent_path + '.' + str(amr_nx_graph.node[child]['child_num'])
node_paths[child_path] = child
#print node_list
node_paths.update(get_node_paths(child_path, child, amr_nx_graph))
return node_paths
forced_alignments = {} # Dictionary: key=instance of node; value=dict with counts of spans aligned to this node in data
my_stopwords = list(string.punctuation) + ['the', 'a', 'to', 'of', 'are', 'is', 'was']
def get_missing_alignment_data(root, amr_nx_graph, alignments, sentence):
sent_len = len(sentence.split())
spans = []
for i in range(1, sent_len):
spans.append(str(i-1) + "-" + str(i))
node_paths = {"0": root}
node_paths.update(get_node_paths("0", root, amr_nx_graph))
aligned_node_paths = []
aligned_spans = []
for alignment in alignments.split():
span, graph_fragments = alignment.split("|")
aligned_spans.append(span)
aligned_node_paths += graph_fragments.split("+")
#print aligned_spans
#print spans
for node_path in node_paths.keys():
if node_path not in aligned_node_paths:
node_instance = amr_nx_graph.node[node_paths[node_path]]['instance']
if node_instance == "multi-sentence":
continue #since we handle these nodes differently
if not forced_alignments.has_key(node_instance):
forced_alignments[node_instance] = {}
for span in spans:
if span not in aligned_spans:
span_start, span_end = span.split('-')
span_words = " ".join(sentence.split()[int(span_start):int(span_end)])
if span_words in my_stopwords:
continue
if not forced_alignments[node_instance].has_key(span_words):
forced_alignments[node_instance][span_words] = 0
forced_alignments[node_instance][span_words] += 1
def add_missing_alignments(root, amr_nx_graph, alignments, sentence):
sent_len = len(sentence.split())
spans = []
for i in range(1, sent_len):
spans.append(str(i-1) + "-" + str(i))
node_paths = {"0": root}
node_paths.update(get_node_paths("0", root, amr_nx_graph))
aligned_node_paths = []
aligned_spans = []
for alignment in alignments.split():
span, graph_fragments = alignment.split("|")
aligned_spans.append(span)
aligned_node_paths += graph_fragments.split("+")
new_alignments = []
for node_path in node_paths.keys():
if node_path not in aligned_node_paths:
node_instance = amr_nx_graph.node[node_paths[node_path]]['instance']
if node_instance == "multi-sentence":
continue #since we handle these nodes differently
max_count = 0
most_aligned_span = ""
most_aligned_span_words = ""
unaligned_spans = []
for span in spans:
if span not in aligned_spans:
unaligned_spans.append(span)
span_start, span_end = span.split('-')
span_words = " ".join(sentence.split()[int(span_start):int(span_end)])
if span_words in my_stopwords:
continue
count = forced_alignments[node_instance][span_words]
if count > max_count:
max_count = count
most_aligned_span = span
most_aligned_span_words = span_words
if not unaligned_spans:
continue
if max_count == 0:
span = unaligned_spans[0]
most_aligned_span = span
span_start, span_end = span.split('-')
most_aligned_span_words = " ".join(sentence.split()[int(span_start):int(span_end)])
new_alignments.append(most_aligned_span + "|" + node_path)
#print "NEW ALIGNMENT: ", node_instance, most_aligned_span_words
return alignments + " " + " ".join(new_alignments)
def get_span_concept(alignment, root, amr_nx_graph, sentence):
span_num, graph_fragments = alignment.split("|")
span_start, span_end = span_num.split("-")
span = sentence.split()[int(span_start):int(span_end)]
#Create a concept networkx graph and add all nodes in graph_fragments
concept_nx_graph = nx.DiGraph()
for graph_fragment in graph_fragments.split("+"):
parent, attr_dict = root, amr_nx_graph.node[root]
for child_num in graph_fragment.split(".")[1:]:
children = amr_nx_graph.successors(parent)
for child in children:
if amr_nx_graph.node[child]['parent'] == parent and amr_nx_graph.node[child]['child_num'] == int(child_num):
parent, attr_dict = child, amr_nx_graph.node[child]
concept_nx_graph.add_node(parent, attr_dict)
#Get all edges between the nodes in graph_fragment and add those to concept_nx_graph
nodes = concept_nx_graph.nodes()
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
if amr_nx_graph.has_edge(nodes[i], nodes[j]):
concept_nx_graph.add_edge(nodes[i], nodes[j], amr_nx_graph.get_edge_data(nodes[i], nodes[j]))
if amr_nx_graph.has_edge(nodes[j], nodes[i]):
concept_nx_graph.add_edge(nodes[j], nodes[i], amr_nx_graph.get_edge_data(nodes[j], nodes[i]))
return (span_start, [span_end, span, concept_nx_graph])
def write_span_concept_dict(span_concept_dict, output_dict_file):
#Sort the concepts for each span by their frequency
for span, concepts in span_concept_dict.iteritems():
span_concept_dict[span] = OrderedDict(sorted(concepts.items(), key=lambda concepts: concepts[1], reverse=True))
for span, concepts in span_concept_dict.iteritems():
span_tag = span.replace(" ", "_").replace("\'", "")
line = span_tag + " "
for (concept_idx, count) in concepts.iteritems():
line += str(concept_idx) + ":" + str(count) + " "
output_dict_file.write(line+"\n")
output_dict_file.write("UNK " + str(len(span_concept_dict)) + ":1\n")
all_concepts = [None, "NULL"] #since null concept should be 1
all_relations = ["NOEDGE", "ROOT_EDGE"]
def create_dataset(amr_nx_graphs, amr_aggregated_metadata):
for value in amr_nx_graphs:
[root, amr_nx_graph, sentence, alignments, id] = value
get_missing_alignment_data(root, amr_nx_graph, alignments, sentence)
span_concept_dataset = {}
span_concept_dict = {}
concept_vw_idx_dict_dataset = {}
for value in amr_nx_graphs:
span_concept = {}
[root, amr_nx_graph, sentence, alignments, id] = value
alignments = add_missing_alignments(root, amr_nx_graph, alignments, sentence)
for alignment in alignments.split():
span, concept = get_span_concept(alignment, root, amr_nx_graph, sentence)
span_concept[span] = concept
span_concept_data, concept_vw_idx_dict = create_span_concept_data(sentence, span_concept, amr_aggregated_metadata[id][1], amr_aggregated_metadata[id][2])
span_concept_dataset[id] = span_concept_data
concept_vw_idx_dict_dataset[id] = concept_vw_idx_dict
for [span, pos, concept, name, ner, nx_root, concept_idx] in span_concept_data:
span = span.replace(" ", "_")
if span_concept_dict.has_key(span):
if span_concept_dict[span].has_key(concept_idx):
span_concept_dict[span][concept_idx] += 1
else:
span_concept_dict[span][concept_idx] = 1
else:
span_concept_dict[span] = {concept_idx:1}
return span_concept_dataset, span_concept_dict, concept_vw_idx_dict_dataset
def print_vw_format(amr_nx_graphs, span_concept_dataset, concept_vw_idx_dict_dataset, output_vw_file):
for value in amr_nx_graphs:
[root, amr_nx_graph, sentence, alignments, id] = value
span_concept_data = span_concept_dataset[id]
concept_vw_idx_dict = concept_vw_idx_dict_dataset[id]
for data in span_concept_data:
span = data[0]
pos = data[1]
concept = data[2].lower()
#short_name = data[3]
ner = data[4]
node = data[5]
concept_idx = data[6]
span = span.replace(":", ".").replace("|", ".")
pos = pos.replace(":", ".").replace("|", ".")
span_tag = span.replace(" ", "_").replace("\'", "")
if not node: #this is a null concept
vw_string = "0 0 1 {}|w {} |p {}".format(span_tag, span, pos)
else:
parents = amr_nx_graph.predecessors(node)
tags = []
parents = [parent for parent in parents if concept_vw_idx_dict.has_key(parent)] #it has an unaligned parent concept, so remove that parent
for parent in parents:
relation = amr_nx_graph[parent][node][0]['relation'].lower()
if relation not in all_relations:
all_relations.append(relation)
tags.append(relation)
if not parents: #this is the root
vw_string = "0 1 {} ".format(concept_idx)
else:
vw_string = "{} {} {} ".format(concept_vw_idx_dict[parents[0]], all_relations.index(tags[0]), concept_idx)
for i in range(1, len(parents)):
vw_string += "{} {} ".format(concept_vw_idx_dict[parents[i]], all_relations.index(tags[i]))
#vw_string += "|w " + span + "|p " + pos + "|n " + ner
vw_string += "{}|w {} |p {}".format(span_tag, span, pos)
output_vw_file.write(vw_string + "\n")
output_vw_file.write("\n")
def main(argv):
if len(argv) < 2:
print "usage: python amr_nx_to_vw.py <amr_nx_graphs.p> <amr_aggregated_metadata.p> <output_file.vw> <output_concepts.p> <output_relations.p> <span_concept_dict> <concept_graph_fragment_dict.p>>"
return
amr_nx_graphs_p = argv[0]
amr_aggregated_metadata_p = argv[1]
output_vw_file = open(argv[2], 'w')
output_dict_file = open(argv[5], 'w')
#Format of amr_nx_graphs
#amr_nx_graphs = {id : [root, amr_nx_graph, sentence, alignment]}
amr_nx_graphs = pickle.load(open(amr_nx_graphs_p, "rb"))
#Format of amr_aggregated_metadata
#amr_aggregated_metadata = {id : [sentence, pos, ner]}
amr_aggregated_metadata = pickle.load(open(amr_aggregated_metadata_p, "rb"))
span_concept_dataset, span_concept_dict, concept_vw_idx_dict_dataset = create_dataset(amr_nx_graphs, amr_aggregated_metadata)
write_span_concept_dict(span_concept_dict, output_dict_file)
print_vw_format(amr_nx_graphs, span_concept_dataset, concept_vw_idx_dict_dataset, output_vw_file)
all_concepts.append("UNK") #for unknown concepts during test time
all_relations.append("UNK") #for unknown relations during test time
pickle.dump(all_concepts, open(argv[3], 'wb'))
pickle.dump(all_relations, open(argv[4], 'wb'))
global concept_graph_fragment_dict
pickle.dump(concept_graph_fragment_dict, open(argv[6], 'wb'))
if __name__ == "__main__":
main(sys.argv[1:])
|
|
import os
import tempfile
import base64
import StringIO
import random
import unittest
from migration_manager import api
from migration_manager import serializer
from migration_manager import repository
from migration_manager import transaction
FOLDER = os.path.dirname(__file__)
REPO_DIR = os.path.join(FOLDER, "_test")
TEST_MIGRATION = api.Migration(
"Test migration",
"""CREATE TABLE auth_user_groups
(
id serial NOT NULL,
user_id integer NOT NULL,
group_id integer NOT NULL,
CONSTRAINT auth_user_groups_pkey PRIMARY KEY (id ),
CONSTRAINT auth_user_groups_group_id_fkey FOREIGN KEY (group_id)
REFERENCES auth_group (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED,
CONSTRAINT user_id_refs_id_831107f1 FOREIGN KEY (user_id)
REFERENCES auth_user (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED,
CONSTRAINT auth_user_groups_user_id_group_id_key UNIQUE (user_id , group_id )
);""",
"DROP TABLE auth_user_groups;"
)
TEST_MIGRATION_NO_BACK = api.Migration(TEST_MIGRATION.migration_name,
TEST_MIGRATION.forward_script, None)
class SerializerTest(object):
serializer_instance = None
migration = None
serialized_migration = None
def _random_migtation(self):
return api.Migration(
base64.encodestring(os.urandom(45)),
base64.encodestring(os.urandom(45)),
base64.encodestring(os.urandom(45)) if self.serializer_instance.supports_backward_migration else None
)
def test_random_data(self):
for ii in xrange(10):
migration = self._random_migtation()
file = tempfile.NamedTemporaryFile(suffix=self.serializer_instance.extension, delete=False)
self.serializer_instance.save(file, migration)
migration2 = self.serializer_instance.load(open(file.name), migration.migration_name)
self.assertEqual(migration._contents(), migration2._contents())
def test_serialize_backwars(self):
if self.serializer_instance.supports_backward_migration:
return
try:
migration = api.Migration("test", "a", "b")
self.serializer_instance.save(tempfile.TemporaryFile(), migration)
self.fail("Save should have raised an exception when saving migration instance that has"
"backward_script in serializer {} that doesn't support backward migrations"
.format(self.serializer_instance))
except api.MigrationError:
pass
def test_load_exception(self):
migration = self._random_migtation()
file = tempfile.TemporaryFile()
self.serializer_instance.save(file, migration)
try:
self.serializer_instance.load(file)
self.fail("Load should raise an exception when not given migration name")
except ValueError:
pass
def test_serialization(self):
if not self.migration or not self.serialized_migration:
return
file = StringIO.StringIO()
self.serializer_instance.save(file, self.migration)
self.assertEqual(self.serialized_migration, file.getvalue())
def test_deserialization(self):
if not self.migration or not self.serialized_migration:
return
file = StringIO.StringIO(self.serialized_migration)
mig = self.serializer_instance.load(file, self.migration.migration_name)
self.assertEqual(self.migration._contents(), mig._contents())
class XMLSerializerTest(unittest.TestCase, SerializerTest):
serializer_instance = serializer.XmlSerializer()
migration = TEST_MIGRATION
serialized_migration = """<migrations name="Test migration">
<forward><![CDATA[CREATE TABLE auth_user_groups
(
id serial NOT NULL,
user_id integer NOT NULL,
group_id integer NOT NULL,
CONSTRAINT auth_user_groups_pkey PRIMARY KEY (id ),
CONSTRAINT auth_user_groups_group_id_fkey FOREIGN KEY (group_id)
REFERENCES auth_group (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED,
CONSTRAINT user_id_refs_id_831107f1 FOREIGN KEY (user_id)
REFERENCES auth_user (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED,
CONSTRAINT auth_user_groups_user_id_group_id_key UNIQUE (user_id , group_id )
);]]></forward>
<reverse><![CDATA[DROP TABLE auth_user_groups;]]></reverse>
</migrations>
"""
class PlaintextSerializerTest(unittest.TestCase, SerializerTest):
serializer_instance = serializer.PlaintextSerializer()
migration = TEST_MIGRATION_NO_BACK
serialized_migration = """CREATE TABLE auth_user_groups
(
id serial NOT NULL,
user_id integer NOT NULL,
group_id integer NOT NULL,
CONSTRAINT auth_user_groups_pkey PRIMARY KEY (id ),
CONSTRAINT auth_user_groups_group_id_fkey FOREIGN KEY (group_id)
REFERENCES auth_group (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED,
CONSTRAINT user_id_refs_id_831107f1 FOREIGN KEY (user_id)
REFERENCES auth_user (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED,
CONSTRAINT auth_user_groups_user_id_group_id_key UNIQUE (user_id , group_id )
);"""
class PickleSerializerTest(unittest.TestCase, SerializerTest):
serializer_instance = serializer.PickleSerializer()
class RepositoryTestBad(unittest.TestCase):
# repository_type = None
def setUp(self):
self.repository = repository.FileSystemRepository(os.path.join(REPO_DIR, "_repository_multiple_ids"))
self.repository.file_extension = "sql"
def test_migration_filenames(self):
self.assertRaises(api.RepositoryError, getattr, self.repository, "migration_filenames")
class RepositoryTestOK(unittest.TestCase):
def setUp(self):
self.repository = repository.FileSystemRepository(os.path.join(REPO_DIR, "_repository_ok"))
self.repository.file_extension = "sql"
def test_migration_filenames(self):
self.assertEqual(self.repository.migration_filenames, {
0 : "00000-initial.sql",
100 : "00100-foo.sql",
200 : "00200-bar.sql"
})
def test_migration_ids(self):
self.assertEqual(self.repository.migration_ids,
[0, 100, 200])
def test_migration_names(self):
self.assertEqual(self.repository.migration_names, {
0 : 'initial',
100 : 'foo',
200 : 'bar'}
)
def test_max_migration(self):
self.assertEqual(self.repository.max_version, 200)
class MockVersionCheck(object):
def __init__(self):
super(MockVersionCheck, self).__init__()
self.version = 0
def schema_update_script(self, version):
return "SeT_ScHeMa({:05d})".format(version)
class ManagerNoBack(unittest.TestCase):
def setUp(self):
self.manager = api.Manager(
repository.FileSystemRepository(os.path.join(REPO_DIR, "_repository_ok")),
serializer.PlaintextSerializer(),
MockVersionCheck(),
transaction.TestTransaction()
)
def test_migrate(self):
self.manager.version_check.version = 200
self.assertRaises(api.MigrationError, self.manager.migrate_to, 0)
# self.manager.migrate_to(0)
class ManagerTests(unittest.TestCase):
def setUp(self):
self.manager = api.Manager(
repository.FileSystemRepository(os.path.join(REPO_DIR, "_manager_test_repository")),
serializer.XmlSerializer(),
MockVersionCheck(),
transaction.TestTransaction()
)
def test_migate_id(self):
self.assertEqual((False, [100, 200]), self.manager.get_migration_ids())
def test_migate_id1a(self):
self.assertEqual((False, [100, 200]), self.manager.get_migration_ids(200))
def test_migate_id2(self):
self.assertEqual((False, [100]), self.manager.get_migration_ids(100))
def test_migate_id3(self):
self.manager.version_check.version = 100
self.assertEqual((False, [200]), self.manager.get_migration_ids())
def test_migate_id4(self):
self.manager.version_check.version = 200
self.assertEqual((False, []), self.manager.get_migration_ids())
def test_migate_id_back(self):
self.manager.version_check.version = 200
self.assertEqual((True, [200, 100]), self.manager.get_migration_ids(0))
def test_migate_id_back_2(self):
self.manager.version_check.version = 200
self.assertEqual((True, [200]), self.manager.get_migration_ids(100))
def test_migate_id_back_2(self):
self.manager.version_check.version = 100
self.assertEqual((True, [100]), self.manager.get_migration_ids(0))
def test_migration_forward(self):
self.manager.migrate_to(200)
self.assertEqual(self.FORWARD_MIGRATION_CONTENTS, self.manager.transaction.contents)
FORWARD_MIGRATION_CONTENTS = """CREATE TABLE auth_permission
(
id serial NOT NULL,
name character varying(50) NOT NULL,
content_type_id integer NOT NULL,
codename character varying(100) NOT NULL,
CONSTRAINT auth_permission_pkey PRIMARY KEY (id ),
CONSTRAINT content_type_id_refs_id_728de91f FOREIGN KEY (content_type_id)
REFERENCES django_content_type (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED,
CONSTRAINT auth_permission_content_type_id_codename_key UNIQUE (content_type_id , codename )
):
SeT_ScHeMa(00100)
CREATE TABLE django_comment_flags
(
id serial NOT NULL,
user_id integer NOT NULL,
comment_id integer NOT NULL,
flag character varying(30) NOT NULL,
flag_date timestamp with time zone NOT NULL,
CONSTRAINT django_comment_flags_pkey PRIMARY KEY (id ),
CONSTRAINT django_comment_flags_comment_id_fkey FOREIGN KEY (comment_id)
REFERENCES django_comments (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED,
CONSTRAINT django_comment_flags_user_id_fkey FOREIGN KEY (user_id)
REFERENCES auth_user (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED,
CONSTRAINT django_comment_flags_user_id_comment_id_flag_key UNIQUE (user_id , comment_id , flag )
)
SeT_ScHeMa(00200)
"""
|
|
# -*- coding: utf-8 -*-
"""
activity.py
~~~~~~~~~~~~
This module implements the Activity HPE OneView REST API
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import filter
from future import standard_library
standard_library.install_aliases()
__title__ = 'activity'
__version__ = '0.0.1'
__copyright__ = '(C) Copyright (2012-2015) Hewlett Packard Enterprise ' \
' Development LP'
__license__ = 'MIT'
__status__ = 'Development'
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
import sys # For verbose
import time # For sleep
from hpOneView.common import uri, get_members
from hpOneView.exceptions import HPOneViewInvalidResource, HPOneViewException, HPOneViewUnknownType, \
HPOneViewTaskError, HPOneViewTimeout
TaskErrorStates = ['Error', 'Warning', 'Terminated', 'Killed']
TaskCompletedStates = ['Error', 'Warning', 'Completed', 'Terminated', 'Killed']
TaskPendingStates = ['New', 'Starting', 'Pending', 'Running', 'Suspended', 'Stopping']
class activity(object):
def __init__(self, con):
self._con = con
###########################################################################
# Tasks
###########################################################################
#
def get_task_associated_resource(self, task):
if not task:
return {}
if task['type'] == 'TaskResource':
obj = self._con.get(task['associatedResourceUri'])
tmp = {
'resourceName': obj['name'],
'associationType': None,
'resourceCategory': None,
'resourceUri': obj['uri']}
elif task['type'] == 'TaskResourceV2':
tmp = task['associatedResource']
else:
raise HPOneViewInvalidResource('Task resource is not a recognized'
' version')
return tmp
def make_task_entity_tuple(self, obj):
task = {}
entity = {}
if obj:
if obj['category'] == 'tasks' or obj['category'] == 'backups':
# it is an error if type is not in obj, so let the except flow
uri = ''
if obj['type'] == 'TaskResource':
task = obj
uri = obj['associatedResourceUri']
elif obj['type'] == 'TaskResourceV2':
task = obj
uri = obj['associatedResource']['resourceUri']
elif obj['type'] == 'BACKUP':
task = self._con.get(obj['taskUri'])
uri = obj['uri']
else:
raise HPOneViewInvalidResource('Task resource is not a'
' recognized version')
if uri:
try:
entity = self._con.get(uri)
except HPOneViewException:
raise
else:
entity = obj
else:
raise HPOneViewUnknownType('Unknown object type')
return task, entity
def is_task_running(self, task):
global TaskPendingStates
if 'uri' in task:
task = self._con.get(task['uri'])
if 'taskState' in task and task['taskState'] in TaskPendingStates:
return True
return False
def wait4task(self, task, tout=60, verbose=False):
count = 0
if task is None:
return None
while self.is_task_running(task):
if verbose:
sys.stdout.write('Task still running after %d seconds \r'
% count)
sys.stdout.flush()
time.sleep(1)
count += 1
if count > tout:
raise HPOneViewTimeout('Waited ' + str(tout) +
' seconds for task to complete, aborting')
task = self._con.get(task['uri'])
if task['taskState'] in TaskErrorStates and task['taskState'] != 'Warning':
err = task['taskErrors'][0]
msg = err['message']
if msg is not None:
raise HPOneViewTaskError(msg)
elif task['taskStatus'] is not None:
raise HPOneViewTaskError(task['taskStatus'])
else:
raise HPOneViewTaskError('Unknown Exception')
return task
def wait4tasks(self, tasks, tout=60, verbose=False):
running = list(filter(self.is_task_running, tasks[:]))
count = 0
while running:
if verbose:
print(('Tasks still running after %s seconds', count))
print(running)
time.sleep(1)
count += 1
running = list(filter(self.is_task_running, running))
if count > tout:
raise HPOneViewTimeout('Waited 60 seconds for task to complete'
', aborting')
def get_tasks(self):
return get_members(self._con.get(uri['task']))
###########################################################################
# Alerts
###########################################################################
def get_alerts(self, AlertState='All'):
if AlertState == 'All':
# TODO remove the evil use/hack of the large count default. The OneView
# API documents that count=-1 should return everything but it is not
# universally honored, where the extremely large count number is.
return get_members(self._con.get(uri['alerts'] +
'?start=0&count=9999999'))
else:
return (self._con.get_entities_byfield(uri['alerts'],
'alertState',
AlertState, count=9999999))
def delete_alert(self, alert):
self._con.delete(alert['uri'])
def delete_alerts(self):
self._con.delete(uri['alerts'])
def update_alert(self, alert, alertMap):
task, moddedAlert = self._con.put(alert['uri'], alertMap)
return moddedAlert
###########################################################################
# Audit Logs
###########################################################################
def get_audit_logs(self, query=''):
body = self._con.get(uri['audit-logs'] + '?' + query)
return get_members(body)
def create_audit_log(self, auditLogRecord):
self._con.post(uri['audit-logs'], auditLogRecord)
return
def download_audit_logs(self, filename):
body = self._con.get(uri['audit-logs-download'])
f = open(filename, 'wb')
f.write(body)
f.close()
return
###########################################################################
# Events
###########################################################################
def get_events(self, query=''):
body = self._con.get(uri['events'] + '?' + query)
return get_members(body)
def create_event(self, eventRecord):
self._con.post(uri['events'], eventRecord)
return
|
|
# -*- coding: utf-8 -*-
"""Test walls."""
import flickrapi
import py
import pytest
import requests
import walls
try:
# Python 3.x
from configparser import ConfigParser
except ImportError:
# Python 2.x
from ConfigParser import SafeConfigParser as ConfigParser
@pytest.fixture
def config(tmpdir):
f = tmpdir.join('config.ini')
f.write('''
[walls]
api_key = myapikey
api_secret = myapisecret
tags = sanfrancisco
image_dir = {0}
width = 1920
height = 1080
'''.format(tmpdir))
return str(f)
@pytest.fixture
def config_obj(config):
cfg = ConfigParser()
cfg.read(config)
return cfg
@pytest.fixture
def flickr():
return flickrapi.FlickrAPI('myapikey', 'myapisecret')
class FakeResponse(requests.Response):
"""Used to emulate some requests behavior."""
def __call__(self, *a, **kw):
"""Lets us use an instance to replace get()."""
return self
def iter_content(self, *a, **kw):
for c in 'this is the data':
yield c.encode()
class SystemExitContext(object):
"""Run pytest.raises, and check the error message."""
def __init__(self, msg, capsys):
self.raises = pytest.raises(SystemExit)
self.capsys = capsys
self.msg = msg
def __enter__(self):
return self.raises.__enter__()
def __exit__(self, *args):
assert self.capsys.readouterr()[1] == self.msg
return self.raises.__exit__(*args)
@pytest.fixture
def errmsg(capsys):
"""Make sure we exit with the given error message."""
def fixture(msg):
return SystemExitContext(msg, capsys)
return fixture
def test_stderr_and_exit(errmsg):
"""Make sure that stderr_and_exit (and therefore errmsg) works."""
with errmsg('Some error message'):
walls.stderr_and_exit('Some error message')
def test_usage(errmsg):
"""Make sure we print out the usage if the arguments are invalid."""
with errmsg('Usage: walls [-c] [config_file]\n'):
walls.main(['walls', 'config_file', 'blah'])
def test_default_config(config, monkeypatch):
"""Override expanduser to point to our temporary config file."""
def my_expanduser(path):
if path == '~/.wallsrc':
return config
return path
monkeypatch.setattr('os.path.expanduser', my_expanduser)
monkeypatch.setattr('walls.run', lambda *a: None)
walls.main(['walls'])
def test_supplied_config(config):
"""Test a config file passed as a command line argument."""
cfg = walls.load_config(config)
assert cfg.get('walls', 'api_key') == 'myapikey'
def test_invalid_config(errmsg):
"""Make sure an error is raised if the config file can't be read."""
with errmsg("Couldn't load config fake.ini\n"):
walls.load_config('fake.ini')
def test_config_no_walls(tmpdir, errmsg):
"""Check for missing [walls] section."""
f = tmpdir.join('config.ini')
f.write('\n')
with errmsg('Config missing [walls] section.\n'):
walls.load_config(str(f))
def test_config_missing(tmpdir, errmsg):
"""Check behavior on missing config values."""
f = tmpdir.join('config.ini')
f.write('''
[walls]
api_secret = myapisecret
image_dir = {0}
width = 1920
height = 1080
'''.format(tmpdir))
with errmsg("Missing config keys: 'api_key', 'tags'\n"):
walls.load_config(str(f))
def test_config_types(tmpdir, errmsg):
"""Check behavior on missing config values."""
f = tmpdir.join('config.ini')
f.write('''
[walls]
api_key = myapikey
api_secret = myapisecret
tags = sanfrancisco
image_dir = {0}
width = abc
height = def
'''.format(tmpdir))
with errmsg("The following must be integers: 'width', 'height'\n"):
walls.load_config(str(f))
def test_config_dest(tmpdir, errmsg):
"""Nonexistent destination directory."""
cfg = '''
[walls]
api_key = myapikey
api_secret = myapisecret
tags = sanfrancisco
image_dir = {0}
width = 1920
height = 1080
'''
f = tmpdir.join('config1.ini')
f.write(cfg.format('/does/not/exist'))
with errmsg('The directory /does/not/exist does not exist.\n'):
walls.load_config(str(f))
f = tmpdir.join('config2.ini')
f.write(cfg.format(f))
with errmsg('The directory {0} does not exist.\n'.format(f)):
walls.load_config(str(f))
def test_clear_dir(tmpdir):
tmpdir.join('a.txt').write('test1')
tmpdir.join('b.txt').write('test2')
tmpdir.mkdir('dir').join('nested.txt').write('test3')
assert len(tmpdir.listdir()) == 3
walls.clear_dir(str(tmpdir))
assert len(tmpdir.listdir()) == 1
def test_smallest_url(flickr):
data = {
'sizes': {'size': [
{
'width': '1280',
'height': '720',
'source': 'url1',
},
{
'width': '1920',
'height': '1080',
'source': 'url2',
},
{
'width': '2560',
'height': '1440',
'source': 'url3',
},
]},
}
flickr.photos_getSizes = lambda **kw: data
assert walls.smallest_url(flickr, 'fake', 1920, 1080) == 'url2'
def test_first_photo_invalid(monkeypatch, config_obj, errmsg):
data = None
monkeypatch.setattr('flickrapi.FlickrAPI.photos_getSizes',
lambda self, **kw: data, raising=False)
monkeypatch.setattr('flickrapi.FlickrAPI.walk',
lambda self, **kw: [{'id': '1'}])
for d in [[], {}, {'sizes': 1}, {'sizes': []}, {'sizes': {'size': 1}},
{'sizes': {'size': [1]}}, {'sizes': {'size': [{}]}}]:
data = d
with errmsg('Unexpected data from Flickr.\n'):
walls.run(config_obj)
def test_run_empty_search(monkeypatch, config_obj, errmsg):
monkeypatch.setattr('flickrapi.FlickrAPI.walk', lambda self, **kw: [])
with errmsg('No matching photos found.\n'):
walls.run(config_obj)
def test_run_bad_request(monkeypatch, config_obj, errmsg):
def raise_function(*a, **kw):
raise IOError()
monkeypatch.setattr('requests.get', raise_function)
monkeypatch.setattr('flickrapi.FlickrAPI.walk',
lambda self, **kw: [{'id': 1}])
monkeypatch.setattr('walls.smallest_url', lambda *a: 'http://example.com')
walls.first_photo = lambda: 'url'
with errmsg('Error downloading image.\n'):
walls.run(config_obj)
def test_main(monkeypatch, config, errmsg):
"""Check that the arg parsing all works."""
c = [False]
def set_clear(*a):
"""Remember that clear was run."""
c[0] = True
def my_expanduser(path):
if path == '~/.wallsrc':
return config
return path
monkeypatch.setattr('os.path.expanduser', my_expanduser)
monkeypatch.setattr('flickrapi.FlickrAPI.walk', lambda self, **kw: [])
monkeypatch.setattr('walls.clear_dir', set_clear)
with errmsg('No matching photos found.\n'):
walls.main(['walls'])
assert not c[0]
for args in [['-c'], ['--clear'], ['-c', config], [config, '-c'],
['--clear', config], [config, '--clear']]:
with errmsg('No matching photos found.\n'):
walls.main(['walls'] + args)
assert c[0]
# Reset clear
c[0] = False
def test_download(monkeypatch, tmpdir):
resp = FakeResponse()
resp.status_code = 200
monkeypatch.setattr('requests.get', resp)
walls.download('file.txt', str(tmpdir))
assert tmpdir.join('file.txt').read() == 'this is the data'
def test_download_status(monkeypatch, tmpdir):
resp = FakeResponse()
resp.status_code = 418
monkeypatch.setattr('requests.get', resp)
with pytest.raises(IOError):
walls.download('file.txt', str(tmpdir))
def test_download_real_status(monkeypatch, tmpdir):
with pytest.raises(IOError):
walls.download('http://0.0.0.0:1234', str(tmpdir))
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 12 12:48:33 2014
@author: ibackus
"""
# External modules
import pynbody
SimArray = pynbody.array.SimArray
import numpy as np
import os
import cPickle as pickle
# ICgen modules
import calc_rho_zr
import calc_temp
import pos_class
import make_snapshot
import ICgen_settings
import make_sigma
import isaac
# Initial stuff
ICgenDir = os.path.dirname(os.path.realpath(__file__))
class IC:
"""
Defines the IC class.
GENERATING NEW INITIAL CONDITIONS
# Generate IC objection from 1-D SimArrays r, sigma (surface density)
IC = ICgen.IC(r, sigma)
"""
def __init__(self, r, sigma):
# Initialize
# Load up default settings
self.settings = ICgen_settings.settings()
# Add modules/attributes
self.T = calc_temp.T(self)
self.maker = maker(self)
self.add = add(self)
# Generate sigma spline interpolation
self.maker.sigma_gen(r, sigma)
# Define a saving function
def saver(filename = None):
"""
A wrapper for ICgen.save
"""
save(self, filename)
self.save = saver
def generate(self, restart=False):
"""
Runs through all the steps to generate a set of initial conditions
IF restart=True, it picks up at the last completed step
"""
if restart:
# Find the last completed step
if hasattr(self, 'pos'): initial_step = 3
elif hasattr(self, 'rho'): initial_step = 2
else: initial_step = 1
else:
initial_step = 1
self.save()
if initial_step <= 1:
# Generate rho
self.maker.rho_gen()
self.save()
if initial_step <= 2:
# Generate positions
self.maker.pos_gen()
self.save()
if initial_step <= 3:
# Generate snapshot
self.maker.snapshot_gen()
self.save()
def save(ICobj, filename=None):
if filename is None:
filename = ICobj.settings.filenames.IC_file_name
save_dict = {}
# --------------------------------------------------
# GET SETTINGS
# --------------------------------------------------
save_dict['settings'] = ICobj.settings
# --------------------------------------------------
# Prepare rho, if available
# --------------------------------------------------
if hasattr(ICobj, 'rho'):
rho = ICobj.rho
# Generate a dictionary containing rho_binned, z_bins, r_bins
rho_dict = {\
'rho': rho.rho_binned,\
'z': rho.z_bins,\
'r': rho.r_bins}
# Update save dictionary
save_dict['rho'] = rho_dict
# --------------------------------------------------
# Prepare sigma, if available
# --------------------------------------------------
if hasattr(ICobj, 'sigma'):
sigma = ICobj.sigma
# Update save dictionary
save_dict['sigma'] = sigma.input_dict
# --------------------------------------------------
# Prepare pos if possible
# --------------------------------------------------
if hasattr(ICobj, 'pos'):
save_dict['pos'] = ICobj.pos
# --------------------------------------------------
# Prepare param if possible
# --------------------------------------------------
if hasattr(ICobj, 'snapshot_param'):
save_dict['snapshot_param'] = ICobj.snapshot_param
param_name = ICobj.settings.filenames.paramName
isaac.configsave(ICobj.snapshot_param, param_name)
print 'param file saved to {}'.format(param_name)
# --------------------------------------------------
# SAVE
# --------------------------------------------------
# Save snapshot if possible
if hasattr(ICobj, 'snapshot'):
fmt = pynbody.tipsy.TipsySnap
fname = ICobj.settings.filenames.snapshotName
save_dict['snapshotName'] = fname
ICobj.snapshot.write(fmt = fmt, filename = fname)
# Save the save dictionary
pickle.dump(save_dict,open(filename,'wb'))
print 'Initial conditions saved to {}'.format(filename)
def load(filename):
# Load everything available from filename
input_dict = pickle.load(open(filename,'rb'))
sigma = input_dict['sigma']['sigma']
r = input_dict['sigma']['r']
ICobj = IC(r, sigma)
# Parse the input dictionary
if 'settings' in input_dict:
print 'loading settings'
ICobj.settings = input_dict['settings']
if 'rho' in input_dict:
print 'loading rho'
ICobj.add.rho(input_dict['rho'])
if 'pos' in input_dict:
print 'loading pos'
ICobj.pos = input_dict['pos']
if 'snapshotName' in input_dict:
print 'loading snapshot'
fname = input_dict['snapshotName']
ICobj.snapshot = pynbody.load(fname)
if 'snapshot_param' in input_dict:
print 'loading param'
ICobj.snapshot_param = input_dict['snapshot_param']
return ICobj
class add:
"""
Contains modules to load data/parameters
"""
def __init__(self, ICobj):
self._parent = ICobj
def rho(self,rho_dict):
"""
Generates a rho object and stores it in ICobj.rho
rho_dict should be a dictionary containing:
'z': 1D array of z values
'r': 1D array of r values
'rho': 2D array of rho evaluated at z,r
Exaple:
rho_dict = pickle.load(open('rhofile.p', 'rb')) # Load up a rho dict
ICobj.add.rho(rho_dict) # create ICobj.rho
"""
# Create rho object (includes a spline interpolation)
rho_binned = rho_dict['rho']
z_bins = rho_dict['z']
r_bins = rho_dict['r']
self._parent.rho = calc_rho_zr.rho_from_array(self._parent, rho_binned, z_bins, r_bins)
print 'rho stored in <IC instance>.rho'
class maker:
"""
A Wrapper containing various functions for generating initial conditions.
Outputs of the functions are saved to the IC object. The IC object is
referenced as self._parent. So to access temperature, simply call
self._parent.T(r)
"""
def __init__(self, ICobj):
self._parent = ICobj
def sigma_gen(self, r, sigma):
"""
A Wrapper for make_sigma.sigma_gen
See make_sigma.sigma_gen for documentation
Upon executing, generates sigma, pdf, and cdf_inv and saves to ICobj
USAGE:
ICobj.maker.sigma_gen(r, sigma)
r and sigma should be 1-D SimArrays. sigma is the surface density
evaluated at r
"""
# Generate sigma
sigma = make_sigma.sigma_gen(r, sigma)
# Copy sigma to the parent (IC) object
self._parent.sigma = sigma
print 'Sigma stored in <IC instance>.sigma'
def rho_gen(self):
"""
A wrapper for calc_rho_zr.
Upon executing, generates rho and rho cdf inverse
"""
# Check that sigma has been generated
if not hasattr(self._parent, 'sigma'):
raise RuntimeError,'Must load/generate sigma before calculating rho'
# Numerically calculate rho(z,r) for a given sigma. rho(z,r)
# obeys vertical hydrostatic equilibrium (approximately)
rho_array, z, r = calc_rho_zr.rho_zr(self._parent)
# Create a complete rho object. Includes rho spline and CDF inverse
rho = calc_rho_zr.rho_from_array(self._parent, rho_array, z, r)
# Save to ICobj
self._parent.rho = rho
print 'rho stored in <IC instance>.rho'
def pos_gen(self, method = None):
"""
A wrapper for generating positions according to rho and sigma
Initializes a pos object (see pos_class.py) and saves it to ICobj.pos
IF called with method not set, the method used is:
ICobj.settings.pos_gen.method
"""
# Generate positions object
pos = pos_class.pos(self._parent, method)
# Save it to ICobj
self._parent.pos = pos
def snapshot_gen(self):
"""
A wrapper for generating a tipsy snapshot from the initial conditions
Uses make_snapshot.py
"""
# Generate snapshot
snapshot, snapshot_param = make_snapshot.snapshot_gen(self._parent)
# Save to ICobj
self._parent.snapshot = snapshot
self._parent.snapshot_param = snapshot_param
|
|
#
# ReportLab QRCode widget
#
# Ported from the Javascript library QRCode for Javascript by Sam Curren
#
# URL: http://www.d-project.com/
# http://d-project.googlecode.com/svn/trunk/misc/qrcode/js/qrcode.js
# qrcode.js is copyright (c) 2009 Kazuhiko Arase
#
# Original ReportLab module by German M. Bravo
#
# modified and improved by Anders Hammarquist <iko@openend.se>
# and used with permission under the ReportLab License
#
# The word "QR Code" is registered trademark of
# DENSO WAVE INCORPORATED
# http://www.denso-wave.com/qrcode/faqpatent-e.html
__all__ = ('QrCodeWidget')
import itertools
from reportlab.platypus.flowables import Flowable
from reportlab.graphics.shapes import Group, Rect
from reportlab.lib import colors
from reportlab.lib.validators import isNumber, isNumberOrNone, isColor, isString, Validator
from reportlab.lib.attrmap import AttrMap, AttrMapValue
from reportlab.graphics.widgetbase import Widget
from reportlab.lib.units import mm
try:
from reportlab.lib.utils import asUnicodeEx, isUnicode
except ImportError:
# ReportLab 2.x compatibility
def asUnicodeEx(v, enc='utf8'):
if isinstance(v, unicode):
return v
if isinstance(v, str):
return v.decode(enc)
return str(v).decode(enc)
def isUnicode(v):
return isinstance(v, unicode)
from reportlab.graphics.barcode import qrencoder
class isLevel(Validator):
def test(self, x):
return x in ['L', 'M', 'Q', 'H']
isLevel = isLevel()
class isUnicodeOrQRList(Validator):
def _test(self, x):
if isUnicode(x):
return True
if all(isinstance(v, qrencoder.QR) for v in x):
return True
return False
def test(self, x):
return self._test(x) or self.normalizeTest(x)
def normalize(self, x):
if self._test(x):
return x
try:
return asUnicodeEx(x)
except UnicodeError:
raise ValueError("Can't convert to unicode: %r" % x)
isUnicodeOrQRList = isUnicodeOrQRList()
class SRect(Rect):
def __init__(self, x, y, width, height, fillColor=colors.black):
Rect.__init__(self, x, y, width, height, fillColor=fillColor,
strokeColor=None, strokeWidth=0)
class QrCodeWidget(Widget):
codeName = "QR"
_attrMap = AttrMap(
BASE = Widget,
value = AttrMapValue(isUnicodeOrQRList, desc='QRCode data'),
x = AttrMapValue(isNumber, desc='x-coord'),
y = AttrMapValue(isNumber, desc='y-coord'),
barFillColor = AttrMapValue(isColor, desc='bar color'),
barWidth = AttrMapValue(isNumber, desc='Width of bars.'), # maybe should be named just width?
barHeight = AttrMapValue(isNumber, desc='Height of bars.'), # maybe should be named just height?
barBorder = AttrMapValue(isNumber, desc='Width of QR border.'), # maybe should be named qrBorder?
barLevel = AttrMapValue(isLevel, desc='QR Code level.'), # maybe should be named qrLevel
qrVersion = AttrMapValue(isNumberOrNone, desc='QR Code version. None for auto'),
# Below are ignored, they make no sense
barStrokeWidth = AttrMapValue(isNumber, desc='Width of bar borders.'),
barStrokeColor = AttrMapValue(isColor, desc='Color of bar borders.'),
)
x = 0
y = 0
barFillColor = colors.black
barStrokeColor = None
barStrokeWidth = 0
barHeight = 32*mm
barWidth = 32*mm
barBorder = 4
barLevel = 'L'
qrVersion = None
value = None
def __init__(self, value='Hello World', **kw):
self.value = isUnicodeOrQRList.normalize(value)
for k, v in kw.items():
setattr(self, k, v)
ec_level = getattr(qrencoder.QRErrorCorrectLevel, self.barLevel)
self.__dict__['qr'] = qrencoder.QRCode(self.qrVersion, ec_level)
if isUnicode(self.value):
self.addData(self.value)
elif self.value:
for v in self.value:
self.addData(v)
def addData(self, value):
self.qr.addData(value)
def draw(self):
self.qr.make()
g = Group()
color = self.barFillColor
border = self.barBorder
width = self.barWidth
height = self.barHeight
x = self.x
y = self.y
g.add(SRect(x, y, width, height, fillColor=None))
moduleCount = self.qr.getModuleCount()
minwh = float(min(width, height))
boxsize = minwh / (moduleCount + border * 2.0)
offsetX = x + (width - minwh) / 2.0
offsetY = y + (minwh - height) / 2.0
for r, row in enumerate(self.qr.modules):
row = map(bool, row)
c = 0
for t, tt in itertools.groupby(row):
isDark = t
count = len(list(tt))
if isDark:
x = (c + border) * boxsize
y = (r + border + 1) * boxsize
s = SRect(offsetX + x, offsetY + height - y, count * boxsize, boxsize)
g.add(s)
c += count
return g
# Flowable version
class QrCode(Flowable):
height = 32*mm
width = 32*mm
qrBorder = 4
qrLevel = 'L'
qrVersion = None
value = None
def __init__(self, value=None, **kw):
self.value = isUnicodeOrQRList.normalize(value)
for k, v in kw.items():
setattr(self, k, v)
ec_level = getattr(qrencoder.QRErrorCorrectLevel, self.qrLevel)
self.qr = qrencoder.QRCode(self.qrVersion, ec_level)
if isUnicode(self.value):
self.addData(self.value)
elif self.value:
for v in self.value:
self.addData(v)
def addData(self, value):
self.qr.addData(value)
def draw(self):
self.qr.make()
moduleCount = self.qr.getModuleCount()
border = self.qrBorder
xsize = self.width / (moduleCount + border * 2.0)
ysize = self.height / (moduleCount + border * 2.0)
for r, row in enumerate(self.qr.modules):
row = map(bool, row)
c = 0
for t, tt in itertools.groupby(row):
isDark = t
count = len(list(tt))
if isDark:
x = (c + border) * xsize
y = self.height - (r + border + 1) * ysize
self.rect(x, y, count * xsize, ysize * 1.05)
c += count
def rect(self, x, y, w, h):
self.canv.rect(x, y, w, h, stroke=0, fill=1)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing images.
"""
import logging
from django.conf import settings # noqa
from django.forms import ValidationError # noqa
from django.forms.widgets import HiddenInput # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class CreateImageForm(forms.SelfHandlingForm):
name = forms.CharField(max_length="255", label=_("Name"), required=True)
description = forms.CharField(widget=forms.widgets.Textarea(),
label=_("Description"),
required=False)
source_type = forms.ChoiceField(
label=_('Image Source'),
choices=[('url', _('Image Location')),
('file', _('Image File'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'source'}))
copy_from = forms.CharField(max_length="255",
label=_("Image Location"),
help_text=_("An external (HTTP) URL to load "
"the image from."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-url': _('Image Location')}),
required=False)
image_file = forms.FileField(label=_("Image File"),
help_text=_("A local image to upload."),
widget=forms.FileInput(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-file': _('Image File')}),
required=False)
disk_format = forms.ChoiceField(label=_('Format'),
required=True,
choices=[('', ''),
('aki',
_('AKI - Amazon Kernel '
'Image')),
('ami',
_('AMI - Amazon Machine '
'Image')),
('ari',
_('ARI - Amazon Ramdisk '
'Image')),
('iso',
_('ISO - Optical Disk Image')),
('qcow2',
_('QCOW2 - QEMU Emulator')),
('raw', 'Raw'),
('vdi', 'VDI'),
('vhd', 'VHD'),
('vmdk', 'VMDK')],
widget=forms.Select(attrs={'class':
'switchable'}))
minimum_disk = forms.IntegerField(label=_("Minimum Disk (GB)"),
help_text=_('The minimum disk size'
' required to boot the'
' image. If unspecified, this'
' value defaults to 0'
' (no minimum).'),
required=False)
minimum_ram = forms.IntegerField(label=_("Minimum Ram (MB)"),
help_text=_('The minimum disk size'
' required to boot the'
' image. If unspecified, this'
' value defaults to 0 (no'
' minimum).'),
required=False)
is_public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def __init__(self, *args, **kwargs):
super(CreateImageForm, self).__init__(*args, **kwargs)
if not settings.HORIZON_IMAGES_ALLOW_UPLOAD:
self.fields['image_file'].widget = HiddenInput()
def clean(self):
data = super(CreateImageForm, self).clean()
if not data['copy_from'] and not data['image_file']:
raise ValidationError(
_("A image or external image location must be specified."))
elif data['copy_from'] and data['image_file']:
raise ValidationError(
_("Can not specify both image and external image location."))
else:
return data
def handle(self, request, data):
# Glance does not really do anything with container_format at the
# moment. It requires it is set to the same disk_format for the three
# Amazon image types, otherwise it just treats them as 'bare.' As such
# we will just set that to be that here instead of bothering the user
# with asking them for information we can already determine.
if data['disk_format'] in ('ami', 'aki', 'ari',):
container_format = data['disk_format']
else:
container_format = 'bare'
meta = {'is_public': data['is_public'],
'protected': data['protected'],
'disk_format': data['disk_format'],
'container_format': container_format,
'min_disk': (data['minimum_disk'] or 0),
'min_ram': (data['minimum_ram'] or 0),
'name': data['name'],
'properties': {}}
if data['description']:
meta['properties']['description'] = data['description']
if settings.HORIZON_IMAGES_ALLOW_UPLOAD and data['image_file']:
meta['data'] = self.files['image_file']
else:
meta['copy_from'] = data['copy_from']
try:
image = api.glance.image_create(request, **meta)
messages.success(request,
_('Your image %s has been queued for creation.') %
data['name'])
return image
except Exception:
exceptions.handle(request, _('Unable to create new image.'))
class UpdateImageForm(forms.SelfHandlingForm):
image_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(max_length="255", label=_("Name"))
description = forms.CharField(widget=forms.widgets.Textarea(),
label=_("Description"),
required=False)
kernel = forms.CharField(max_length="36", label=_("Kernel ID"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}
))
ramdisk = forms.CharField(max_length="36", label=_("Ramdisk ID"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}
))
architecture = forms.CharField(label=_("Architecture"), required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}
))
disk_format = forms.CharField(label=_("Format"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}
))
public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def handle(self, request, data):
image_id = data['image_id']
error_updating = _('Unable to update image "%s".')
if data['disk_format'] in ['aki', 'ari', 'ami']:
container_format = data['disk_format']
else:
container_format = 'bare'
meta = {'is_public': data['public'],
'protected': data['protected'],
'disk_format': data['disk_format'],
'container_format': container_format,
'name': data['name'],
'properties': {}}
if data['description']:
meta['properties']['description'] = data['description']
if data['kernel']:
meta['properties']['kernel_id'] = data['kernel']
if data['ramdisk']:
meta['properties']['ramdisk_id'] = data['ramdisk']
if data['architecture']:
meta['properties']['architecture'] = data['architecture']
# Ensure we do not delete properties that have already been
# set on an image.
meta['purge_props'] = False
try:
image = api.glance.image_update(request, image_id, **meta)
messages.success(request, _('Image was successfully updated.'))
return image
except Exception:
exceptions.handle(request, error_updating % image_id)
|
|
"""Module for interactive demos using IPython.
This module implements a few classes for running Python scripts interactively
in IPython for demonstrations. With very simple markup (a few tags in
comments), you can control points where the script stops executing and returns
control to IPython.
Provided classes
================
The classes are (see their docstrings for further details):
- Demo: pure python demos
- IPythonDemo: demos with input to be processed by IPython as if it had been
typed interactively (so magics work, as well as any other special syntax you
may have added via input prefilters).
- LineDemo: single-line version of the Demo class. These demos are executed
one line at a time, and require no markup.
- IPythonLineDemo: IPython version of the LineDemo class (the demo is
executed a line at a time, but processed via IPython).
- ClearMixin: mixin to make Demo classes with less visual clutter. It
declares an empty marquee and a pre_cmd that clears the screen before each
block (see Subclassing below).
- ClearDemo, ClearIPDemo: mixin-enabled versions of the Demo and IPythonDemo
classes.
Subclassing
===========
The classes here all include a few methods meant to make customization by
subclassing more convenient. Their docstrings below have some more details:
- marquee(): generates a marquee to provide visible on-screen markers at each
block start and end.
- pre_cmd(): run right before the execution of each block.
- post_cmd(): run right after the execution of each block. If the block
raises an exception, this is NOT called.
Operation
=========
The file is run in its own empty namespace (though you can pass it a string of
arguments as if in a command line environment, and it will see those as
sys.argv). But at each stop, the global IPython namespace is updated with the
current internal demo namespace, so you can work interactively with the data
accumulated so far.
By default, each block of code is printed (with syntax highlighting) before
executing it and you have to confirm execution. This is intended to show the
code to an audience first so you can discuss it, and only proceed with
execution once you agree. There are a few tags which allow you to modify this
behavior.
The supported tags are:
# <demo> stop
Defines block boundaries, the points where IPython stops execution of the
file and returns to the interactive prompt.
You can optionally mark the stop tag with extra dashes before and after the
word 'stop', to help visually distinguish the blocks in a text editor:
# <demo> --- stop ---
# <demo> silent
Make a block execute silently (and hence automatically). Typically used in
cases where you have some boilerplate or initialization code which you need
executed but do not want to be seen in the demo.
# <demo> auto
Make a block execute automatically, but still being printed. Useful for
simple code which does not warrant discussion, since it avoids the extra
manual confirmation.
# <demo> auto_all
This tag can _only_ be in the first block, and if given it overrides the
individual auto tags to make the whole demo fully automatic (no block asks
for confirmation). It can also be given at creation time (or the attribute
set later) to override what's in the file.
While _any_ python file can be run as a Demo instance, if there are no stop
tags the whole file will run in a single block (no different that calling
first %pycat and then %run). The minimal markup to make this useful is to
place a set of stop tags; the other tags are only there to let you fine-tune
the execution.
This is probably best explained with the simple example file below. You can
copy this into a file named ex_demo.py, and try running it via:
from IPython.demo import Demo
d = Demo('ex_demo.py')
d() <--- Call the d object (omit the parens if you have autocall set to 2).
Each time you call the demo object, it runs the next block. The demo object
has a few useful methods for navigation, like again(), edit(), jump(), seek()
and back(). It can be reset for a new run via reset() or reloaded from disk
(in case you've edited the source) via reload(). See their docstrings below.
Note: To make this simpler to explore, a file called "demo-exercizer.py" has
been added to the "docs/examples/core" directory. Just cd to this directory in
an IPython session, and type::
%run demo-exercizer.py
and then follow the directions.
Example
=======
The following is a very simple example of a valid demo file.
#################### EXAMPLE DEMO <ex_demo.py> ###############################
'''A simple interactive demo to illustrate the use of IPython's Demo class.'''
print 'Hello, welcome to an interactive IPython demo.'
# The mark below defines a block boundary, which is a point where IPython will
# stop execution and return to the interactive prompt. The dashes are actually
# optional and used only as a visual aid to clearly separate blocks while
# editing the demo code.
# <demo> stop
x = 1
y = 2
# <demo> stop
# the mark below makes this block as silent
# <demo> silent
print 'This is a silent block, which gets executed but not printed.'
# <demo> stop
# <demo> auto
print 'This is an automatic block.'
print 'It is executed without asking for confirmation, but printed.'
z = x+y
print 'z=',x
# <demo> stop
# This is just another normal block.
print 'z is now:', z
print 'bye!'
################### END EXAMPLE DEMO <ex_demo.py> ############################
"""
#*****************************************************************************
# Copyright (C) 2005-2006 Fernando Perez. <Fernando.Perez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#
#*****************************************************************************
import exceptions
import os
import re
import shlex
import sys
from IPython.PyColorize import Parser
from IPython.genutils import marquee, file_read, file_readlines, Term
__all__ = ['Demo','IPythonDemo','LineDemo','IPythonLineDemo','DemoError']
class DemoError(exceptions.Exception): pass
def re_mark(mark):
return re.compile(r'^\s*#\s+<demo>\s+%s\s*$' % mark,re.MULTILINE)
class Demo(object):
re_stop = re_mark('-*\s?stop\s?-*')
re_silent = re_mark('silent')
re_auto = re_mark('auto')
re_auto_all = re_mark('auto_all')
def __init__(self,src,title='',arg_str='',auto_all=None):
"""Make a new demo object. To run the demo, simply call the object.
See the module docstring for full details and an example (you can use
IPython.Demo? in IPython to see it).
Inputs:
- src is either a file, or file-like object, or a
string that can be resolved to a filename.
Optional inputs:
- title: a string to use as the demo name. Of most use when the demo
you are making comes from an object that has no filename, or if you
want an alternate denotation distinct from the filename.
- arg_str(''): a string of arguments, internally converted to a list
just like sys.argv, so the demo script can see a similar
environment.
- auto_all(None): global flag to run all blocks automatically without
confirmation. This attribute overrides the block-level tags and
applies to the whole demo. It is an attribute of the object, and
can be changed at runtime simply by reassigning it to a boolean
value.
"""
if hasattr(src, "read"):
# It seems to be a file or a file-like object
self.fname = "from a file-like object"
if title == '':
self.title = "from a file-like object"
else:
self.title = title
else:
# Assume it's a string or something that can be converted to one
self.fname = src
if title == '':
(filepath, filename) = os.path.split(src)
self.title = filename
else:
self.title = title
self.sys_argv = [src] + shlex.split(arg_str)
self.auto_all = auto_all
self.src = src
# get a few things from ipython. While it's a bit ugly design-wise,
# it ensures that things like color scheme and the like are always in
# sync with the ipython mode being used. This class is only meant to
# be used inside ipython anyways, so it's OK.
self.ip_ns = __IPYTHON__.user_ns
self.ip_colorize = __IPYTHON__.pycolorize
self.ip_showtb = __IPYTHON__.showtraceback
self.ip_runlines = __IPYTHON__.runlines
self.shell = __IPYTHON__
# load user data and initialize data structures
self.reload()
def fload(self):
"""Load file object."""
# read data and parse into blocks
if hasattr(self, 'fobj') and self.fobj is not None:
self.fobj.close()
if hasattr(self.src, "read"):
# It seems to be a file or a file-like object
self.fobj = self.src
else:
# Assume it's a string or something that can be converted to one
self.fobj = open(self.fname)
def reload(self):
"""Reload source from disk and initialize state."""
self.fload()
self.src = self.fobj.read()
src_b = [b.strip() for b in self.re_stop.split(self.src) if b]
self._silent = [bool(self.re_silent.findall(b)) for b in src_b]
self._auto = [bool(self.re_auto.findall(b)) for b in src_b]
# if auto_all is not given (def. None), we read it from the file
if self.auto_all is None:
self.auto_all = bool(self.re_auto_all.findall(src_b[0]))
else:
self.auto_all = bool(self.auto_all)
# Clean the sources from all markup so it doesn't get displayed when
# running the demo
src_blocks = []
auto_strip = lambda s: self.re_auto.sub('',s)
for i,b in enumerate(src_b):
if self._auto[i]:
src_blocks.append(auto_strip(b))
else:
src_blocks.append(b)
# remove the auto_all marker
src_blocks[0] = self.re_auto_all.sub('',src_blocks[0])
self.nblocks = len(src_blocks)
self.src_blocks = src_blocks
# also build syntax-highlighted source
self.src_blocks_colored = map(self.ip_colorize,self.src_blocks)
# ensure clean namespace and seek offset
self.reset()
def reset(self):
"""Reset the namespace and seek pointer to restart the demo"""
self.user_ns = {}
self.finished = False
self.block_index = 0
def _validate_index(self,index):
if index<0 or index>=self.nblocks:
raise ValueError('invalid block index %s' % index)
def _get_index(self,index):
"""Get the current block index, validating and checking status.
Returns None if the demo is finished"""
if index is None:
if self.finished:
print >>Term.cout, 'Demo finished. Use <demo_name>.reset() if you want to rerun it.'
return None
index = self.block_index
else:
self._validate_index(index)
return index
def seek(self,index):
"""Move the current seek pointer to the given block.
You can use negative indices to seek from the end, with identical
semantics to those of Python lists."""
if index<0:
index = self.nblocks + index
self._validate_index(index)
self.block_index = index
self.finished = False
def back(self,num=1):
"""Move the seek pointer back num blocks (default is 1)."""
self.seek(self.block_index-num)
def jump(self,num=1):
"""Jump a given number of blocks relative to the current one.
The offset can be positive or negative, defaults to 1."""
self.seek(self.block_index+num)
def again(self):
"""Move the seek pointer back one block and re-execute."""
self.back(1)
self()
def edit(self,index=None):
"""Edit a block.
If no number is given, use the last block executed.
This edits the in-memory copy of the demo, it does NOT modify the
original source file. If you want to do that, simply open the file in
an editor and use reload() when you make changes to the file. This
method is meant to let you change a block during a demonstration for
explanatory purposes, without damaging your original script."""
index = self._get_index(index)
if index is None:
return
# decrease the index by one (unless we're at the very beginning), so
# that the default demo.edit() call opens up the sblock we've last run
if index>0:
index -= 1
filename = self.shell.mktempfile(self.src_blocks[index])
self.shell.hooks.editor(filename,1)
new_block = file_read(filename)
# update the source and colored block
self.src_blocks[index] = new_block
self.src_blocks_colored[index] = self.ip_colorize(new_block)
self.block_index = index
# call to run with the newly edited index
self()
def show(self,index=None):
"""Show a single block on screen"""
index = self._get_index(index)
if index is None:
return
print >>Term.cout, self.marquee('<%s> block # %s (%s remaining)' %
(self.title,index,self.nblocks-index-1))
print >>Term.cout,(self.src_blocks_colored[index])
sys.stdout.flush()
def show_all(self):
"""Show entire demo on screen, block by block"""
fname = self.title
title = self.title
nblocks = self.nblocks
silent = self._silent
marquee = self.marquee
for index,block in enumerate(self.src_blocks_colored):
if silent[index]:
print >>Term.cout, marquee('<%s> SILENT block # %s (%s remaining)' %
(title,index,nblocks-index-1))
else:
print >>Term.cout, marquee('<%s> block # %s (%s remaining)' %
(title,index,nblocks-index-1))
print >>Term.cout, block,
sys.stdout.flush()
def runlines(self,source):
"""Execute a string with one or more lines of code"""
exec source in self.user_ns
def __call__(self,index=None):
"""run a block of the demo.
If index is given, it should be an integer >=1 and <= nblocks. This
means that the calling convention is one off from typical Python
lists. The reason for the inconsistency is that the demo always
prints 'Block n/N, and N is the total, so it would be very odd to use
zero-indexing here."""
index = self._get_index(index)
if index is None:
return
try:
marquee = self.marquee
next_block = self.src_blocks[index]
self.block_index += 1
if self._silent[index]:
print >>Term.cout, marquee('Executing silent block # %s (%s remaining)' %
(index,self.nblocks-index-1))
else:
self.pre_cmd()
self.show(index)
if self.auto_all or self._auto[index]:
print >>Term.cout, marquee('output:')
else:
print >>Term.cout, marquee('Press <q> to quit, <Enter> to execute...'),
ans = raw_input().strip()
if ans:
print >>Term.cout, marquee('Block NOT executed')
return
try:
save_argv = sys.argv
sys.argv = self.sys_argv
self.runlines(next_block)
self.post_cmd()
finally:
sys.argv = save_argv
except:
self.ip_showtb(filename=self.fname)
else:
self.ip_ns.update(self.user_ns)
if self.block_index == self.nblocks:
mq1 = self.marquee('END OF DEMO')
if mq1:
# avoid spurious print >>Term.cout,s if empty marquees are used
print >>Term.cout
print >>Term.cout, mq1
print >>Term.cout, self.marquee('Use <demo_name>.reset() if you want to rerun it.')
self.finished = True
# These methods are meant to be overridden by subclasses who may wish to
# customize the behavior of of their demos.
def marquee(self,txt='',width=78,mark='*'):
"""Return the input string centered in a 'marquee'."""
return marquee(txt,width,mark)
def pre_cmd(self):
"""Method called before executing each block."""
pass
def post_cmd(self):
"""Method called after executing each block."""
pass
class IPythonDemo(Demo):
"""Class for interactive demos with IPython's input processing applied.
This subclasses Demo, but instead of executing each block by the Python
interpreter (via exec), it actually calls IPython on it, so that any input
filters which may be in place are applied to the input block.
If you have an interactive environment which exposes special input
processing, you can use this class instead to write demo scripts which
operate exactly as if you had typed them interactively. The default Demo
class requires the input to be valid, pure Python code.
"""
def runlines(self,source):
"""Execute a string with one or more lines of code"""
self.shell.runlines(source)
class LineDemo(Demo):
"""Demo where each line is executed as a separate block.
The input script should be valid Python code.
This class doesn't require any markup at all, and it's meant for simple
scripts (with no nesting or any kind of indentation) which consist of
multiple lines of input to be executed, one at a time, as if they had been
typed in the interactive prompt.
Note: the input can not have *any* indentation, which means that only
single-lines of input are accepted, not even function definitions are
valid."""
def reload(self):
"""Reload source from disk and initialize state."""
# read data and parse into blocks
self.fload()
lines = self.fobj.readlines()
src_b = [l for l in lines if l.strip()]
nblocks = len(src_b)
self.src = ''.join(lines)
self._silent = [False]*nblocks
self._auto = [True]*nblocks
self.auto_all = True
self.nblocks = nblocks
self.src_blocks = src_b
# also build syntax-highlighted source
self.src_blocks_colored = map(self.ip_colorize,self.src_blocks)
# ensure clean namespace and seek offset
self.reset()
class IPythonLineDemo(IPythonDemo,LineDemo):
"""Variant of the LineDemo class whose input is processed by IPython."""
pass
class ClearMixin(object):
"""Use this mixin to make Demo classes with less visual clutter.
Demos using this mixin will clear the screen before every block and use
blank marquees.
Note that in order for the methods defined here to actually override those
of the classes it's mixed with, it must go /first/ in the inheritance
tree. For example:
class ClearIPDemo(ClearMixin,IPythonDemo): pass
will provide an IPythonDemo class with the mixin's features.
"""
def marquee(self,txt='',width=78,mark='*'):
"""Blank marquee that returns '' no matter what the input."""
return ''
def pre_cmd(self):
"""Method called before executing each block.
This one simply clears the screen."""
import IPython.platutils
IPython.platutils.term_clear()
class ClearDemo(ClearMixin,Demo):
pass
class ClearIPDemo(ClearMixin,IPythonDemo):
pass
|
|
from _pydevd_bundle.pydevd_constants import dict_contains
import sys
from _pydevd_bundle import pydevd_xml
from os.path import basename
import traceback
try:
from urllib import quote, quote_plus, unquote, unquote_plus
except:
from urllib.parse import quote, quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport
#===================================================================================================
# print_var_node
#===================================================================================================
def print_var_node(xml_node, stream):
name = xml_node.getAttribute('name')
value = xml_node.getAttribute('value')
val_type = xml_node.getAttribute('type')
found_as = xml_node.getAttribute('found_as')
stream.write('Name: ')
stream.write(unquote_plus(name))
stream.write(', Value: ')
stream.write(unquote_plus(value))
stream.write(', Type: ')
stream.write(unquote_plus(val_type))
if found_as:
stream.write(', Found as: %s' % (unquote_plus(found_as),))
stream.write('\n')
#===================================================================================================
# print_referrers
#===================================================================================================
def print_referrers(obj, stream=None):
if stream is None:
stream = sys.stdout
result = get_referrer_info(obj)
from xml.dom.minidom import parseString
dom = parseString(result)
xml = dom.getElementsByTagName('xml')[0]
for node in xml.childNodes:
if node.nodeType == node.TEXT_NODE:
continue
if node.localName == 'for':
stream.write('Searching references for: ')
for child in node.childNodes:
if child.nodeType == node.TEXT_NODE:
continue
print_var_node(child, stream)
elif node.localName == 'var':
stream.write('Referrer found: ')
print_var_node(node, stream)
else:
sys.stderr.write('Unhandled node: %s\n' % (node,))
return result
#===================================================================================================
# get_referrer_info
#===================================================================================================
def get_referrer_info(searched_obj):
DEBUG = 0
if DEBUG:
sys.stderr.write('Getting referrers info.\n')
try:
try:
if searched_obj is None:
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Skipping getting referrers for None',
additionalInXml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
obj_id = id(searched_obj)
try:
if DEBUG:
sys.stderr.write('Getting referrers...\n')
import gc
referrers = gc.get_referrers(searched_obj)
except:
traceback.print_exc()
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Exception raised while trying to get_referrers.',
additionalInXml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
if DEBUG:
sys.stderr.write('Found %s referrers.\n' % (len(referrers),))
curr_frame = sys._getframe()
frame_type = type(curr_frame)
#Ignore this frame and any caller frame of this frame
ignore_frames = {} #Should be a set, but it's not available on all python versions.
while curr_frame is not None:
if basename(curr_frame.f_code.co_filename).startswith('pydev'):
ignore_frames[curr_frame] = 1
curr_frame = curr_frame.f_back
ret = ['<xml>\n']
ret.append('<for>\n')
if DEBUG:
sys.stderr.write('Searching Referrers of obj with id="%s"\n' % (obj_id,))
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Referrers of obj with id="%s"' % (obj_id,)))
ret.append('</for>\n')
all_objects = None
for r in referrers:
try:
if dict_contains(ignore_frames, r):
continue #Skip the references we may add ourselves
except:
pass #Ok: unhashable type checked...
if r is referrers:
continue
r_type = type(r)
r_id = str(id(r))
representation = str(r_type)
found_as = ''
if r_type == frame_type:
if DEBUG:
sys.stderr.write('Found frame referrer: %r\n' % (r,))
for key, val in r.f_locals.items():
if val is searched_obj:
found_as = key
break
elif r_type == dict:
if DEBUG:
sys.stderr.write('Found dict referrer: %r\n' % (r,))
# Try to check if it's a value in the dict (and under which key it was found)
for key, val in r.items():
if val is searched_obj:
found_as = key
if DEBUG:
sys.stderr.write(' Found as %r in dict\n' % (found_as,))
break
#Ok, there's one annoying thing: many times we find it in a dict from an instance,
#but with this we don't directly have the class, only the dict, so, to workaround that
#we iterate over all reachable objects ad check if one of those has the given dict.
if all_objects is None:
all_objects = gc.get_objects()
for x in all_objects:
try:
if getattr(x, '__dict__', None) is r:
r = x
r_type = type(x)
r_id = str(id(r))
representation = str(r_type)
break
except:
pass #Just ignore any error here (i.e.: ReferenceError, etc.)
elif r_type in (tuple, list):
if DEBUG:
sys.stderr.write('Found tuple referrer: %r\n' % (r,))
#Don't use enumerate() because not all Python versions have it.
i = 0
for x in r:
if x is searched_obj:
found_as = '%s[%s]' % (r_type.__name__, i)
if DEBUG:
sys.stderr.write(' Found as %s in tuple: \n' % (found_as,))
break
i += 1
if found_as:
if not isinstance(found_as, str):
found_as = str(found_as)
found_as = ' found_as="%s"' % (pydevd_xml.make_valid_xml_value(found_as),)
ret.append(pydevd_xml.var_to_xml(
r,
representation,
additionalInXml=' id="%s"%s' % (r_id, found_as)))
finally:
if DEBUG:
sys.stderr.write('Done searching for references.\n')
#If we have any exceptions, don't keep dangling references from this frame to any of our objects.
all_objects = None
referrers = None
searched_obj = None
r = None
x = None
key = None
val = None
curr_frame = None
ignore_frames = None
except:
traceback.print_exc()
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Error getting referrers for:',
additionalInXml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
ret.append('</xml>')
ret = ''.join(ret)
return ret
|
|
import os
import json
import enum
from Tests.scripts.utils.content_packs_util import IGNORED_FILES
CONTENT_ROOT_PATH = os.path.abspath(os.path.join(__file__, '../../..')) # full path to content root repo
PACKS_FOLDER = "Packs" # name of base packs folder inside content repo
PACKS_FULL_PATH = os.path.join(CONTENT_ROOT_PATH, PACKS_FOLDER) # full path to Packs folder in content repo
IGNORED_PATHS = [os.path.join(PACKS_FOLDER, p) for p in IGNORED_FILES]
LANDING_PAGE_SECTIONS_PATH = os.path.abspath(os.path.join(__file__, '../landingPage_sections.json'))
class BucketUploadFlow(object):
""" Bucket Upload Flow constants
"""
PACKS_RESULTS_FILE = "packs_results.json"
PREPARE_CONTENT_FOR_TESTING = "prepare_content_for_testing"
UPLOAD_PACKS_TO_MARKETPLACE_STORAGE = "upload_packs_to_marketplace_storage"
SUCCESSFUL_PACKS = "successful_packs"
SUCCESSFUL_PRIVATE_PACKS = "successful_private_packs"
FAILED_PACKS = "failed_packs"
STATUS = "status"
AGGREGATED = "aggregated"
IMAGES = 'images'
AUTHOR = 'author'
INTEGRATIONS = 'integrations'
BUCKET_UPLOAD_BUILD_TITLE = "Upload Packs To Marketplace Storage"
BUCKET_UPLOAD_TYPE = "bucket_upload_flow"
# Different upload job names relate to different CI platforms:
# "Upload Packs To Marketplace" - CircleCI
# "upload-packs-to-marketplace" - Gitlab
UPLOAD_JOB_NAMES = ["Upload Packs To Marketplace", "upload-packs-to-marketplace"]
LATEST_VERSION = 'latest_version'
INTEGRATION_DIR_REGEX = r"^integration-(.+).yml$"
class GCPConfig(object):
""" Google cloud storage basic configurations
"""
CONTENT_PACKS_PATH = "content/packs"
PRODUCTION_STORAGE_BASE_PATH = "content/packs"
IMAGES_BASE_PATH = "content/packs" # images packs prefix stored in metadata
BUILD_PATH_PREFIX = "content/builds"
BUILD_BASE_PATH = ""
PRIVATE_BASE_PATH = "content/packs"
STORAGE_CONTENT_PATH = "content" # base path for content in gcs
USE_GCS_RELATIVE_PATH = True # whether to use relative path in uploaded to gcs images
GCS_PUBLIC_URL = "https://storage.googleapis.com" # disable-secrets-detection
PRODUCTION_BUCKET = "marketplace-dist"
CI_BUILD_BUCKET = "marketplace-ci-build"
PRODUCTION_PRIVATE_BUCKET = "marketplace-dist-private"
CI_PRIVATE_BUCKET = "marketplace-ci-build-private"
BASE_PACK = "Base" # base pack name
INDEX_NAME = "index" # main index folder name
CORE_PACK_FILE_NAME = "corepacks.json" # core packs file name
BUILD_BUCKET_PACKS_ROOT_PATH = 'content/builds/{branch}/{build}/content/packs'
with open(os.path.join(os.path.dirname(__file__), 'core_packs_list.json'), 'r') as core_packs_list_file:
CORE_PACKS_LIST = json.load(core_packs_list_file)
with open(os.path.join(os.path.dirname(__file__), 'upgrade_core_packs_list.json'), 'r') as upgrade_core_packs_list:
packs_list = json.load(upgrade_core_packs_list)
CORE_PACKS_LIST_TO_UPDATE = packs_list.get("update_core_packs_list")
class PackTags(object):
""" Pack tag constants """
TRENDING = "Trending"
NEW = "New"
TIM = "TIM"
USE_CASE = "Use Case"
TRANSFORMER = "Transformer"
FILTER = "Filter"
class Metadata(object):
""" Metadata constants and default values that are used in metadata parsing.
"""
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
XSOAR_SUPPORT = "xsoar"
PARTNER_SUPPORT = "partner"
XSOAR_SUPPORT_URL = "https://www.paloaltonetworks.com/cortex" # disable-secrets-detection
XSOAR_AUTHOR = "Cortex XSOAR"
SERVER_DEFAULT_MIN_VERSION = "6.0.0"
CERTIFIED = "certified"
EULA_URL = "https://github.com/demisto/content/blob/master/LICENSE" # disable-secrets-detection
CURRENT_VERSION = 'currentVersion'
SERVER_MIN_VERSION = 'serverMinVersion'
HIDDEN = 'hidden'
NAME = 'name'
ID = 'id'
DESCRIPTION = 'description'
CREATED = 'created'
UPDATED = 'updated'
LEGACY = 'legacy'
SUPPORT = 'support'
SUPPORT_DETAILS = 'supportDetails'
EULA_LINK = 'eulaLink'
AUTHOR = 'author'
AUTHOR_IMAGE = 'authorImage'
CERTIFICATION = 'certification'
PRICE = 'price'
VERSION_INFO = 'versionInfo'
COMMIT = 'commit'
DOWNLOADS = 'downloads'
TAGS = 'tags'
CATEGORIES = 'categories'
CONTENT_ITEMS = 'contentItems'
SEARCH_RANK = 'searchRank'
INTEGRATIONS = 'integrations'
USE_CASES = 'useCases'
KEY_WORDS = 'keywords'
DEPENDENCIES = 'dependencies'
PREMIUM = 'premium'
VENDOR_ID = 'vendorId'
PARTNER_ID = 'partnerId'
PARTNER_NAME = 'partnerName'
CONTENT_COMMIT_HASH = 'contentCommitHash'
PREVIEW_ONLY = 'previewOnly'
MANDATORY = 'mandatory'
DISPLAYED_IMAGES = 'displayedImages'
EMAIL = 'email'
URL = 'url'
class PackFolders(enum.Enum):
""" Pack known folders. Should be replaced by constants from demisto-sdk in later step.
"""
SCRIPTS = "Scripts"
PLAYBOOKS = "Playbooks"
INTEGRATIONS = "Integrations"
TEST_PLAYBOOKS = 'TestPlaybooks'
REPORTS = "Reports"
DASHBOARDS = 'Dashboards'
WIDGETS = 'Widgets'
INCIDENT_FIELDS = 'IncidentFields'
INCIDENT_TYPES = 'IncidentTypes'
INDICATOR_FIELDS = 'IndicatorFields'
LAYOUTS = 'Layouts'
CLASSIFIERS = 'Classifiers'
INDICATOR_TYPES = 'IndicatorTypes'
CONNECTIONS = "Connections"
GENERIC_DEFINITIONS = "GenericDefinitions"
GENERIC_FIELDS = "GenericFields"
GENERIC_MODULES = "GenericModules"
GENERIC_TYPES = "GenericTypes"
LISTS = 'Lists'
PREPROCESS_RULES = "PreProcessRules"
JOBS = 'Jobs'
@classmethod
def pack_displayed_items(cls):
return {
PackFolders.SCRIPTS.value, PackFolders.DASHBOARDS.value, PackFolders.INCIDENT_FIELDS.value,
PackFolders.INCIDENT_TYPES.value, PackFolders.INTEGRATIONS.value, PackFolders.PLAYBOOKS.value,
PackFolders.INDICATOR_FIELDS.value, PackFolders.REPORTS.value, PackFolders.INDICATOR_TYPES.value,
PackFolders.LAYOUTS.value, PackFolders.CLASSIFIERS.value, PackFolders.WIDGETS.value,
PackFolders.GENERIC_DEFINITIONS.value, PackFolders.GENERIC_FIELDS.value, PackFolders.GENERIC_MODULES.value,
PackFolders.GENERIC_TYPES.value, PackFolders.LISTS.value, PackFolders.JOBS.value
}
@classmethod
def yml_supported_folders(cls):
return {PackFolders.INTEGRATIONS.value, PackFolders.SCRIPTS.value, PackFolders.PLAYBOOKS.value,
PackFolders.TEST_PLAYBOOKS.value}
@classmethod
def json_supported_folders(cls):
return {
PackFolders.CLASSIFIERS.value, PackFolders.CONNECTIONS.value, PackFolders.DASHBOARDS.value,
PackFolders.INCIDENT_FIELDS.value, PackFolders.INCIDENT_TYPES.value, PackFolders.INDICATOR_FIELDS.value,
PackFolders.LAYOUTS.value, PackFolders.INDICATOR_TYPES.value, PackFolders.REPORTS.value,
PackFolders.WIDGETS.value, PackFolders.GENERIC_DEFINITIONS.value, PackFolders.GENERIC_FIELDS.value,
PackFolders.GENERIC_MODULES.value, PackFolders.GENERIC_TYPES.value, PackFolders.LISTS.value,
PackFolders.PREPROCESS_RULES.value, PackFolders.JOBS.value
}
class PackIgnored(object):
""" A class that represents all pack files/directories to be ignored if a change is detected in any of them
ROOT_FILES: The files in the pack root directory
NESTED_FILES: The files to be ignored inside the pack entities directories. Empty list = all files.
NESTED_DIRS: The 2nd level directories under the pack entities directories to ignore all of their files.
"""
PACK_IGNORE = ".pack-ignore"
SECRETS_IGNORE = ".secrets-ignore"
ROOT_FILES = [SECRETS_IGNORE, PACK_IGNORE]
NESTED_FILES = {
PackFolders.INTEGRATIONS.value: ["README.md", "Pipfile", "Pipfile.lock", "_test.py", "commands.txt"],
PackFolders.SCRIPTS.value: ["README.md", "Pipfile", "Pipfile.lock", "_test.py"],
PackFolders.TEST_PLAYBOOKS.value: [],
PackFolders.PLAYBOOKS.value: ["_README.md"],
}
NESTED_DIRS = [PackFolders.INTEGRATIONS.value, PackFolders.SCRIPTS.value]
class PackStatus(enum.Enum):
""" Enum of pack upload status, is used in printing upload summary.
"""
SUCCESS = "Successfully uploaded pack data to gcs"
FAILED_LOADING_USER_METADATA = "Failed in loading user defined metadata"
FAILED_IMAGES_UPLOAD = "Failed to upload pack integration images to gcs"
FAILED_AUTHOR_IMAGE_UPLOAD = "Failed to upload pack author image to gcs"
FAILED_METADATA_PARSING = "Failed to parse and create metadata.json"
FAILED_COLLECT_ITEMS = "Failed to collect pack content items data"
FAILED_ZIPPING_PACK_ARTIFACTS = "Failed zipping pack artifacts"
FAILED_SIGNING_PACKS = "Failed to sign the packs"
FAILED_PREPARING_INDEX_FOLDER = "Failed in preparing and cleaning necessary index files"
FAILED_UPDATING_INDEX_FOLDER = "Failed updating index folder"
FAILED_UPLOADING_PACK = "Failed in uploading pack zip to gcs"
PACK_ALREADY_EXISTS = "Specified pack already exists in gcs under latest version"
PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD = "Specific pack is not updated in current build"
FAILED_REMOVING_PACK_SKIPPED_FOLDERS = "Failed to remove pack hidden and skipped folders"
FAILED_RELEASE_NOTES = "Failed to generate changelog.json"
FAILED_DETECTING_MODIFIED_FILES = "Failed in detecting modified files of the pack"
FAILED_SEARCHING_PACK_IN_INDEX = "Failed in searching pack folder in index"
FAILED_DECRYPT_PACK = "Failed to decrypt pack: a premium pack," \
" which should be encrypted, seems not to be encrypted."
FAILED_METADATA_REFORMATING = "Failed to reparse and create metadata.json when missing dependencies"
class Changelog(object):
"""
A class that represents all the keys that are present in a Changelog entry.
"""
RELEASE_NOTES = 'releaseNotes'
DISPLAY_NAME = 'displayName'
RELEASED = 'released'
|
|
"Implements KeyDict and KeySet classes"
from collections import defaultdict
import numpy as np
from .small_classes import Numbers, Quantity
from .small_scripts import is_sweepvar, isnan, SweepValue
DIMLESS_QUANTITY = Quantity(1, "dimensionless")
INT_DTYPE = np.dtype(int)
def clean_value(key, value):
"""Gets the value of variable-less monomials, so that
`x.sub({x: gpkit.units.m})` and `x.sub({x: gpkit.ureg.m})` are equivalent.
Also converts any quantities to the key's units, because quantities
can't/shouldn't be stored as elements of numpy arrays.
"""
if hasattr(value, "__len__"):
return [clean_value(key, v) for v in value]
if hasattr(value, "exp") and not value.exp:
value = value.value
if hasattr(value, "units") and not hasattr(value, "hmap"):
value = value.to(key.units or "dimensionless").magnitude
return value
class KeyDict(dict):
"""KeyDicts do two things over a dict: map keys and collapse arrays.
>>>> kd = gpkit.keydict.KeyDict()
Mapping keys
------------
If ``.keymapping`` is True, a KeyDict keeps an internal list of VarKeys as
canonical keys, and their values can be accessed with any object whose
`key` attribute matches one of those VarKeys, or with strings matching
any of the multiple possible string interpretations of each key:
For example, after creating the KeyDict kd and setting kd[x] = v (where x
is a Variable or VarKey), v can be accessed with by the following keys:
- x
- x.key
- x.name (a string)
- "x_modelname" (x's name including modelname)
Note that if a item is set using a key that does not have a `.key`
attribute, that key can be set and accessed normally.
Collapsing arrays
-----------------
If ``.collapse_arrays`` is True then VarKeys which have a `shape`
parameter (indicating they are part of an array) are stored as numpy
arrays, and automatically de-indexed when a matching VarKey with a
particular `idx` parameter is used as a key.
See also: gpkit/tests/t_keydict.py.
"""
collapse_arrays = True
keymapping = True
def __init__(self, *args, **kwargs):
"Passes through to dict.__init__ via the `update()` method"
# pylint: disable=super-init-not-called
self.varkeys = None
self.keymap = defaultdict(set)
self._unmapped_keys = set()
self.log_gets = False
self.logged_gets = set()
self.update(*args, **kwargs)
def get(self, key, alternative=KeyError):
if key not in self:
if alternative is KeyError:
raise alternative(key)
return alternative
return self[key]
def update(self, *args, **kwargs):
"Iterates through the dictionary created by args and kwargs"
for k, v in dict(*args, **kwargs).items():
if hasattr(v, "copy"):
# We don't want just a reference (for e.g. numpy arrays)
# KeyDict values are expected to be immutable (Numbers)
# or to have a copy attribute.
v = v.copy()
self[k] = v
def parse_and_index(self, key):
"Returns key if key had one, and veckey/idx for indexed veckeys."
idx = None
try:
key = key.key
if self.collapse_arrays and key.idx:
key, idx = key.veckey, key.idx
except AttributeError:
if not self.varkeys:
self.update_keymap()
elif key in self.varkeys:
keys = self.varkeys[key]
origkey, key = key, next(iter(keys))
if len(keys) > 1:
if (key.veckey
and all(k.veckey == key.veckey for k in keys)):
key = key.veckey
else:
raise ValueError("%s could refer to multiple keys in"
" this substitutions KeyDict. Use"
" `.variables_byname(%s)` to see all"
" of them." % (origkey, origkey))
else:
raise KeyError(key)
if self.collapse_arrays:
idx = getattr(key, "idx", None)
if idx:
key = key.veckey
return key, idx
def __contains__(self, key):
"In a winding way, figures out if a key is in the KeyDict"
try:
key, idx = self.parse_and_index(key)
except KeyError:
return False
except ValueError: # multiple keys correspond
return True
if dict.__contains__(self, key):
if idx:
try:
value = dict.__getitem__(self, key)[idx]
return True if is_sweepvar(value) else not isnan(value)
except TypeError:
raise TypeError("%s has an idx, but its value in this"
" KeyDict is the scalar %s."
% (key, dict.__getitem__(self, key)))
except IndexError:
raise IndexError("key %s with idx %s is out of bounds"
" for value %s" %
(key, idx,
dict.__getitem__(self, key)))
return True
elif key in self.keymap:
return True
else:
return False
def __call__(self, key):
got = self[key]
# if uniting ever becomes a speed hit, cache the results
if isinstance(got, dict):
for k, v in got.items():
got[k] = v*(k.units or DIMLESS_QUANTITY)
else:
if not hasattr(key, "units"):
parsedkey, _ = self.parse_and_index(key)
keys = self.keymap[parsedkey]
key, = keys
got = Quantity(got, key.units or DIMLESS_QUANTITY)
return got
def __getitem__(self, key):
"Overloads __getitem__ and [] access to work with all keys"
key, idx = self.parse_and_index(key)
keys = self.keymap[key]
if not keys:
del self.keymap[key] # remove blank entry added due to defaultdict
raise KeyError(key)
values = []
for k in keys:
got = dict.__getitem__(self, k)
if idx:
got = got[idx]
values.append(got)
if self.log_gets:
self.logged_gets.add(k)
if len(values) == 1:
return values[0]
return dict(zip(keys, values))
def __setitem__(self, key, value):
"Overloads __setitem__ and []= to work with all keys"
# pylint: disable=too-many-boolean-expressions
key, idx = self.parse_and_index(key)
if key not in self.keymap:
self.keymap[key].add(key)
self._unmapped_keys.add(key)
if idx:
number_array = isinstance(value, Numbers)
kwargs = {} if number_array else {"dtype": "object"}
emptyvec = np.full(key.shape, np.nan, **kwargs)
dict.__setitem__(self, key, emptyvec)
if hasattr(value, "exp") and not value.exp:
value = value.value # substitute constant monomials
if isinstance(value, Quantity):
value = value.to(key.units or "dimensionless").magnitude
if idx:
if is_sweepvar(value):
dict.__setitem__(self, key,
np.array(dict.__getitem__(self, key), object))
value = SweepValue(value[1])
dict.__getitem__(self, key)[idx] = value
else:
if (self.collapse_arrays and hasattr(key, "descr")
and "shape" in key.descr # if veckey, and
and not isinstance(value, np.ndarray) # not an array, and
and not is_sweepvar(value)): # not a sweep, then
if not hasattr(value, "__len__"): # fill an array with it, or
value = np.full(key.shape, value, "f")
# if it's not a list of arrays (as in a sol), clean it up!
elif not isinstance(value[0], np.ndarray):
value = np.array([clean_value(key, v) for v in value])
if getattr(value, "shape", False) and dict.__contains__(self, key):
goodvals = ~isnan(value)
if self[key].dtype != value.dtype:
# e.g., we're replacing a number with a linked function
dict.__setitem__(self, key, np.array(self[key],
dtype=value.dtype))
self[key][goodvals] = value[goodvals]
else:
if hasattr(value, "dtype") and value.dtype == INT_DTYPE:
value = np.array(value, "f")
dict.__setitem__(self, key, value)
def update_keymap(self):
"Updates the keymap with the keys in _unmapped_keys"
while self.keymapping and self._unmapped_keys:
key = self._unmapped_keys.pop()
if hasattr(key, "keys"):
for mapkey in key.keys:
self.keymap[mapkey].add(key)
def __delitem__(self, key):
"Overloads del [] to work with all keys"
key, idx = self.parse_and_index(key)
keys = self.keymap[key]
if not keys:
raise KeyError("key %s not found." % key)
for k in list(keys):
delete = True
if idx:
dict.__getitem__(self, k)[idx] = np.nan
if not isnan(dict.__getitem__(self, k)).all():
delete = False
if delete:
dict.__delitem__(self, k)
mapkeys = set([k])
if self.keymapping and hasattr(k, "keys"):
mapkeys.update(k.keys)
for mappedkey in mapkeys:
if mappedkey in self.keymap:
self.keymap[mappedkey].remove(k)
if not self.keymap[mappedkey]:
del self.keymap[mappedkey]
class KeySet(KeyDict):
"KeyDicts that don't collapse arrays or store values."
collapse_arrays = False
def add(self, item):
"Adds an item to the keyset"
key, _ = self.parse_and_index(item)
if key not in self.keymap:
self.keymap[key].add(key)
self._unmapped_keys.add(key)
dict.__setitem__(self, key, None)
def update(self, *args, **kwargs):
"Iterates through the dictionary created by args and kwargs"
if len(args) == 1: # set-like interface
for item in args[0]:
self.add(item)
else: # dict-like interface
for k in dict(*args, **kwargs):
self.add(k)
def __getitem__(self, key):
"Gets the keys corresponding to a particular key."
key, _ = self.parse_and_index(key)
return self.keymap[key]
|
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import xml.sax
import time
import uuid
import urllib
import boto
from boto.connection import AWSAuthConnection
from boto import handler
from boto.resultset import ResultSet
import boto.jsonresponse
import exception
import hostedzone
HZXML = """<?xml version="1.0" encoding="UTF-8"?>
<CreateHostedZoneRequest xmlns="%(xmlns)s">
<Name>%(name)s</Name>
<CallerReference>%(caller_ref)s</CallerReference>
<HostedZoneConfig>
<Comment>%(comment)s</Comment>
</HostedZoneConfig>
</CreateHostedZoneRequest>"""
#boto.set_stream_logger('dns')
class Route53Connection(AWSAuthConnection):
DefaultHost = 'route53.amazonaws.com'
"""The default Route53 API endpoint to connect to."""
Version = '2012-02-29'
"""Route53 API version."""
XMLNameSpace = 'https://route53.amazonaws.com/doc/2012-02-29/'
"""XML schema for this Route53 API version."""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
host=DefaultHost, debug=0):
AWSAuthConnection.__init__(self, host,
aws_access_key_id, aws_secret_access_key,
True, port, proxy, proxy_port, debug=debug)
def _required_auth_capability(self):
return ['route53']
def make_request(self, action, path, headers=None, data='', params=None):
if params:
pairs = []
for key, val in params.iteritems():
if val is None: continue
pairs.append(key + '=' + urllib.quote(str(val)))
path += '?' + '&'.join(pairs)
return AWSAuthConnection.make_request(self, action, path, headers, data)
# Hosted Zones
def get_all_hosted_zones(self, start_marker=None, zone_list=None):
"""
Returns a Python data structure with information about all
Hosted Zones defined for the AWS account.
:param int start_marker: start marker to pass when fetching additional
results after a truncated list
:param list zone_list: a HostedZones list to prepend to results
"""
params = {}
if start_marker:
params = {'marker': start_marker}
response = self.make_request('GET', '/%s/hostedzone' % self.Version,
params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='HostedZones',
item_marker=('HostedZone',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
if zone_list:
e['ListHostedZonesResponse']['HostedZones'].extend(zone_list)
while e['ListHostedZonesResponse'].has_key('NextMarker'):
next_marker = e['ListHostedZonesResponse']['NextMarker']
zone_list = e['ListHostedZonesResponse']['HostedZones']
e = self.get_all_hosted_zones(next_marker, zone_list)
return e
def get_hosted_zone(self, hosted_zone_id):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
"""
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_hosted_zone_by_name(self, hosted_zone_name):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_name: str
:param hosted_zone_name: The fully qualified domain name for the Hosted
Zone
"""
if hosted_zone_name[-1] != '.':
hosted_zone_name += '.'
all_hosted_zones = self.get_all_hosted_zones()
for zone in all_hosted_zones['ListHostedZonesResponse']['HostedZones']:
#check that they gave us the FQDN for their zone
if zone['Name'] == hosted_zone_name:
return self.get_hosted_zone(zone['Id'].split('/')[-1])
def create_hosted_zone(self, domain_name, caller_ref=None, comment=''):
"""
Create a new Hosted Zone. Returns a Python data structure with
information about the newly created Hosted Zone.
:type domain_name: str
:param domain_name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication. If you omit the final period,
Amazon Route 53 assumes the domain is relative to the root.
This is the name you have registered with your DNS registrar.
It is also the name you will delegate from your registrar to
the Amazon Route 53 delegation servers returned in
response to this request.A list of strings with the image
IDs wanted.
:type caller_ref: str
:param caller_ref: A unique string that identifies the request
and that allows failed CreateHostedZone requests to be retried
without the risk of executing the operation twice. If you don't
provide a value for this, boto will generate a Type 4 UUID and
use that.
:type comment: str
:param comment: Any comments you want to include about the hosted
zone.
"""
if caller_ref is None:
caller_ref = str(uuid.uuid4())
params = {'name' : domain_name,
'caller_ref' : caller_ref,
'comment' : comment,
'xmlns' : self.XMLNameSpace}
xml = HZXML % params
uri = '/%s/hostedzone' % self.Version
response = self.make_request('POST', uri,
{'Content-Type' : 'text/xml'}, xml)
body = response.read()
boto.log.debug(body)
if response.status == 201:
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
raise exception.DNSServerError(response.status,
response.reason,
body)
def delete_hosted_zone(self, hosted_zone_id):
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('DELETE', uri)
body = response.read()
boto.log.debug(body)
if response.status not in (200, 204):
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
# Resource Record Sets
def get_all_rrsets(self, hosted_zone_id, type=None,
name=None, identifier=None, maxitems=None):
"""
Retrieve the Resource Record Sets defined for this Hosted Zone.
Returns the raw XML data returned by the Route53 call.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type type: str
:param type: The type of resource record set to begin the record
listing from. Valid choices are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
Valid values for weighted resource record sets:
* A
* AAAA
* CNAME
* TXT
Valid values for Zone Apex Aliases:
* A
* AAAA
:type name: str
:param name: The first name in the lexicographic ordering of domain
names to be retrieved
:type identifier: str
:param identifier: In a hosted zone that includes weighted resource
record sets (multiple resource record sets with the same DNS
name and type that are differentiated only by SetIdentifier),
if results were truncated for a given DNS name and type,
the value of SetIdentifier for the next resource record
set that has the current DNS name and type
:type maxitems: int
:param maxitems: The maximum number of records
"""
from boto.route53.record import ResourceRecordSets
params = {'type': type, 'name': name,
'Identifier': identifier, 'maxitems': maxitems}
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri, params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
rs = ResourceRecordSets(connection=self, hosted_zone_id=hosted_zone_id)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
def change_rrsets(self, hosted_zone_id, xml_body):
"""
Create or change the authoritative DNS information for this
Hosted Zone.
Returns a Python data structure with information about the set of
changes, including the Change ID.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type xml_body: str
:param xml_body: The list of changes to be made, defined in the
XML schema defined by the Route53 service.
"""
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('POST', uri,
{'Content-Type' : 'text/xml'},
xml_body)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_change(self, change_id):
"""
Get information about a proposed set of changes, as submitted
by the change_rrsets method.
Returns a Python data structure with status information about the
changes.
:type change_id: str
:param change_id: The unique identifier for the set of changes.
This ID is returned in the response to the change_rrsets method.
"""
uri = '/%s/change/%s' % (self.Version, change_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
|
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Usage: change_mach_o_flags.py [--executable-heap] [--no-pie] <executablepath>
Arranges for the executable at |executable_path| to have its data (heap)
pages protected to prevent execution on Mac OS X 10.7 ("Lion"), and to have
the PIE (position independent executable) bit set to enable ASLR (address
space layout randomization). With --executable-heap or --no-pie, the
respective bits are cleared instead of set, making the heap executable or
disabling PIE/ASLR.
This script is able to operate on thin (single-architecture) Mach-O files
and fat (universal, multi-architecture) files. When operating on fat files,
it will set or clear the bits for each architecture contained therein.
NON-EXECUTABLE HEAP
Traditionally in Mac OS X, 32-bit processes did not have data pages set to
prohibit execution. Although user programs could call mprotect and
mach_vm_protect to deny execution of code in data pages, the kernel would
silently ignore such requests without updating the page tables, and the
hardware would happily execute code on such pages. 64-bit processes were
always given proper hardware protection of data pages. This behavior was
controllable on a system-wide level via the vm.allow_data_exec sysctl, which
is set by default to 1. The bit with value 1 (set by default) allows code
execution on data pages for 32-bit processes, and the bit with value 2
(clear by default) does the same for 64-bit processes.
In Mac OS X 10.7, executables can "opt in" to having hardware protection
against code execution on data pages applied. This is done by setting a new
bit in the |flags| field of an executable's |mach_header|. When
MH_NO_HEAP_EXECUTION is set, proper protections will be applied, regardless
of the setting of vm.allow_data_exec. See xnu-1699.22.73/osfmk/vm/vm_map.c
override_nx and xnu-1699.22.73/bsd/kern/mach_loader.c load_machfile.
The Apple toolchain has been revised to set the MH_NO_HEAP_EXECUTION when
producing executables, provided that -allow_heap_execute is not specified
at link time. Only linkers shipping with Xcode 4.0 and later (ld64-123.2 and
later) have this ability. See ld64-123.2.1/src/ld/Options.cpp
Options::reconfigureDefaults() and
ld64-123.2.1/src/ld/HeaderAndLoadCommands.hpp
HeaderAndLoadCommandsAtom<A>::flags().
This script sets the MH_NO_HEAP_EXECUTION bit on Mach-O executables. It is
intended for use with executables produced by a linker that predates Apple's
modifications to set this bit itself. It is also useful for setting this bit
for non-i386 executables, including x86_64 executables. Apple's linker only
sets it for 32-bit i386 executables, presumably under the assumption that
the value of vm.allow_data_exec is set in stone. However, if someone were to
change vm.allow_data_exec to 2 or 3, 64-bit x86_64 executables would run
without hardware protection against code execution on data pages. This
script can set the bit for x86_64 executables, guaranteeing that they run
with appropriate protection even when vm.allow_data_exec has been tampered
with.
POSITION-INDEPENDENT EXECUTABLES/ADDRESS SPACE LAYOUT RANDOMIZATION
This script sets or clears the MH_PIE bit in an executable's Mach-O header,
enabling or disabling position independence on Mac OS X 10.5 and later.
Processes running position-independent executables have varying levels of
ASLR protection depending on the OS release. The main executable's load
address, shared library load addresess, and the heap and stack base
addresses may be randomized. Position-independent executables are produced
by supplying the -pie flag to the linker (or defeated by supplying -no_pie).
Executables linked with a deployment target of 10.7 or higher have PIE on
by default.
This script is never strictly needed during the build to enable PIE, as all
linkers used are recent enough to support -pie. However, it's used to
disable the PIE bit as needed on already-linked executables.
"""
import optparse
import os
import struct
import sys
# <mach-o/fat.h>
FAT_MAGIC = 0xcafebabe
FAT_CIGAM = 0xbebafeca
# <mach-o/loader.h>
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
MH_EXECUTE = 0x2
MH_PIE = 0x00200000
MH_NO_HEAP_EXECUTION = 0x01000000
class MachOError(Exception):
"""A class for exceptions thrown by this module."""
pass
def CheckedSeek(file, offset):
"""Seeks the file-like object at |file| to offset |offset| and raises a
MachOError if anything funny happens."""
file.seek(offset, os.SEEK_SET)
new_offset = file.tell()
if new_offset != offset:
raise MachOError, \
'seek: expected offset %d, observed %d' % (offset, new_offset)
def CheckedRead(file, count):
"""Reads |count| bytes from the file-like |file| object, raising a
MachOError if any other number of bytes is read."""
bytes = file.read(count)
if len(bytes) != count:
raise MachOError, \
'read: expected length %d, observed %d' % (count, len(bytes))
return bytes
def ReadUInt32(file, endian):
"""Reads an unsinged 32-bit integer from the file-like |file| object,
treating it as having endianness specified by |endian| (per the |struct|
module), and returns it as a number. Raises a MachOError if the proper
length of data can't be read from |file|."""
bytes = CheckedRead(file, 4)
(uint32,) = struct.unpack(endian + 'I', bytes)
return uint32
def ReadMachHeader(file, endian):
"""Reads an entire |mach_header| structure (<mach-o/loader.h>) from the
file-like |file| object, treating it as having endianness specified by
|endian| (per the |struct| module), and returns a 7-tuple of its members
as numbers. Raises a MachOError if the proper length of data can't be read
from |file|."""
bytes = CheckedRead(file, 28)
magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = \
struct.unpack(endian + '7I', bytes)
return magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags
def ReadFatArch(file):
"""Reads an entire |fat_arch| structure (<mach-o/fat.h>) from the file-like
|file| object, treating it as having endianness specified by |endian|
(per the |struct| module), and returns a 5-tuple of its members as numbers.
Raises a MachOError if the proper length of data can't be read from
|file|."""
bytes = CheckedRead(file, 20)
cputype, cpusubtype, offset, size, align = struct.unpack('>5I', bytes)
return cputype, cpusubtype, offset, size, align
def WriteUInt32(file, uint32, endian):
"""Writes |uint32| as an unsinged 32-bit integer to the file-like |file|
object, treating it as having endianness specified by |endian| (per the
|struct| module)."""
bytes = struct.pack(endian + 'I', uint32)
assert len(bytes) == 4
file.write(bytes)
def HandleMachOFile(file, options, offset=0):
"""Seeks the file-like |file| object to |offset|, reads its |mach_header|,
and rewrites the header's |flags| field if appropriate. The header's
endianness is detected. Both 32-bit and 64-bit Mach-O headers are supported
(mach_header and mach_header_64). Raises MachOError if used on a header that
does not have a known magic number or is not of type MH_EXECUTE. The
MH_PIE and MH_NO_HEAP_EXECUTION bits are set or cleared in the |flags| field
according to |options| and written to |file| if any changes need to be made.
If already set or clear as specified by |options|, nothing is written."""
CheckedSeek(file, offset)
magic = ReadUInt32(file, '<')
if magic == MH_MAGIC or magic == MH_MAGIC_64:
endian = '<'
elif magic == MH_CIGAM or magic == MH_CIGAM_64:
endian = '>'
else:
raise MachOError, \
'Mach-O file at offset %d has illusion of magic' % offset
CheckedSeek(file, offset)
magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = \
ReadMachHeader(file, endian)
assert magic == MH_MAGIC or magic == MH_MAGIC_64
if filetype != MH_EXECUTE:
raise MachOError, \
'Mach-O file at offset %d is type 0x%x, expected MH_EXECUTE' % \
(offset, filetype)
original_flags = flags
if options.no_heap_execution:
flags |= MH_NO_HEAP_EXECUTION
else:
flags &= ~MH_NO_HEAP_EXECUTION
if options.pie:
flags |= MH_PIE
else:
flags &= ~MH_PIE
if flags != original_flags:
CheckedSeek(file, offset + 24)
WriteUInt32(file, flags, endian)
def HandleFatFile(file, options, fat_offset=0):
"""Seeks the file-like |file| object to |offset| and loops over its
|fat_header| entries, calling HandleMachOFile for each."""
CheckedSeek(file, fat_offset)
magic = ReadUInt32(file, '>')
assert magic == FAT_MAGIC
nfat_arch = ReadUInt32(file, '>')
for index in xrange(0, nfat_arch):
cputype, cpusubtype, offset, size, align = ReadFatArch(file)
assert size >= 28
# HandleMachOFile will seek around. Come back here after calling it, in
# case it sought.
fat_arch_offset = file.tell()
HandleMachOFile(file, options, offset)
CheckedSeek(file, fat_arch_offset)
def main(me, args):
parser = optparse.OptionParser('%prog [options] <executable_path>')
parser.add_option('--executable-heap', action='store_false',
dest='no_heap_execution', default=True,
help='Clear the MH_NO_HEAP_EXECUTION bit')
parser.add_option('--no-pie', action='store_false',
dest='pie', default=True,
help='Clear the MH_PIE bit')
(options, loose_args) = parser.parse_args(args)
if len(loose_args) != 1:
parser.print_usage()
return 1
executable_path = loose_args[0]
executable_file = open(executable_path, 'rb+')
magic = ReadUInt32(executable_file, '<')
if magic == FAT_CIGAM:
# Check FAT_CIGAM and not FAT_MAGIC because the read was little-endian.
HandleFatFile(executable_file, options)
elif magic == MH_MAGIC or magic == MH_CIGAM or \
magic == MH_MAGIC_64 or magic == MH_CIGAM_64:
HandleMachOFile(executable_file, options)
else:
raise MachOError, '%s is not a Mach-O or fat file' % executable_file
executable_file.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[0], sys.argv[1:]))
|
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from datetime import datetime
import os.path
import shutil
from StringIO import StringIO
import tempfile
import unittest
from trac.attachment import Attachment
from trac.core import *
from trac.test import EnvironmentStub
from trac.tests.resource import TestResourceChangeListener
from trac.util.datefmt import utc, to_utimestamp
from trac.wiki import WikiPage, IWikiChangeListener
class TestWikiChangeListener(Component):
implements(IWikiChangeListener)
def __init__(self):
self.added = []
self.changed = []
self.deleted = []
self.deleted_version = []
self.renamed = []
def wiki_page_added(self, page):
self.added.append(page)
def wiki_page_changed(self, page, version, t, comment, author, ipnr):
self.changed.append((page, version, t, comment, author, ipnr))
def wiki_page_deleted(self, page):
self.deleted.append(page)
def wiki_page_version_deleted(self, page):
self.deleted_version.append(page)
def wiki_page_renamed(self, page, old_name):
self.renamed.append((page, old_name))
class WikiPageTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.env.path = os.path.join(tempfile.gettempdir(), 'trac-tempenv')
os.mkdir(self.env.path)
def tearDown(self):
shutil.rmtree(self.env.path)
self.env.reset_db()
def test_new_page(self):
page = WikiPage(self.env)
self.assertEqual(False, page.exists)
self.assertEqual(None, page.name)
self.assertEqual(0, page.version)
self.assertEqual('', page.text)
self.assertEqual(0, page.readonly)
self.assertEqual('', page.author)
self.assertEqual('', page.comment)
self.assertEqual(None, page.time)
def test_existing_page(self):
t = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
self.env.db_transaction(
"INSERT INTO wiki VALUES(%s,%s,%s,%s,%s,%s,%s,%s)",
('TestPage', 1, to_utimestamp(t), 'joe', '::1', 'Bla bla',
'Testing', 0))
page = WikiPage(self.env, 'TestPage')
self.assertEqual(True, page.exists)
self.assertEqual('TestPage', page.name)
self.assertEqual(1, page.version)
self.assertEqual(None, page.resource.version) # FIXME: Intentional?
self.assertEqual('Bla bla', page.text)
self.assertEqual(0, page.readonly)
self.assertEqual('joe', page.author)
self.assertEqual('Testing', page.comment)
self.assertEqual(t, page.time)
history = list(page.get_history())
self.assertEqual(1, len(history))
self.assertEqual((1, t, 'joe', 'Testing', '::1'), history[0])
page = WikiPage(self.env, 'TestPage', 1)
self.assertEqual(1, page.resource.version)
def test_create_page(self):
page = WikiPage(self.env)
page.name = 'TestPage'
page.text = 'Bla bla'
t = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
page.save('joe', 'Testing', '::1', t)
self.assertEqual(True, page.exists)
self.assertEqual(1, page.version)
self.assertEqual(1, page.resource.version)
self.assertEqual(0, page.readonly)
self.assertEqual('joe', page.author)
self.assertEqual('Testing', page.comment)
self.assertEqual(t, page.time)
self.assertEqual(
[(1, to_utimestamp(t), 'joe', '::1', 'Bla bla', 'Testing', 0)],
self.env.db_query("""
SELECT version, time, author, ipnr, text, comment, readonly
FROM wiki WHERE name=%s
""", ('TestPage',)))
listener = TestWikiChangeListener(self.env)
self.assertEqual(page, listener.added[0])
def test_update_page(self):
t = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
t2 = datetime(2002, 1, 1, 1, 1, 1, 0, utc)
self.env.db_transaction(
"INSERT INTO wiki VALUES(%s,%s,%s,%s,%s,%s,%s,%s)",
('TestPage', 1, to_utimestamp(t), 'joe', '::1', 'Bla bla',
'Testing', 0))
page = WikiPage(self.env, 'TestPage')
page.text = 'Bla'
page.save('kate', 'Changing', '192.168.0.101', t2)
self.assertEqual(2, page.version)
self.assertEqual(2, page.resource.version)
self.assertEqual(0, page.readonly)
self.assertEqual('kate', page.author)
self.assertEqual('Changing', page.comment)
self.assertEqual(t2, page.time)
with self.env.db_query as db:
rows = db("""
SELECT version, time, author, ipnr, text, comment, readonly
FROM wiki WHERE name=%s
""", ('TestPage',))
self.assertEqual(2, len(rows))
self.assertEqual((1, to_utimestamp(t), 'joe', '::1', 'Bla bla',
'Testing', 0), rows[0])
self.assertEqual((2, to_utimestamp(t2), 'kate', '192.168.0.101',
'Bla', 'Changing', 0), rows[1])
listener = TestWikiChangeListener(self.env)
self.assertEqual((page, 2, t2, 'Changing', 'kate', '192.168.0.101'),
listener.changed[0])
page = WikiPage(self.env, 'TestPage')
history = list(page.get_history())
self.assertEqual(2, len(history))
self.assertEqual((2, t2, 'kate', 'Changing', '192.168.0.101'),
history[0])
self.assertEqual((1, t, 'joe', 'Testing', '::1'), history[1])
def test_delete_page(self):
self.env.db_transaction(
"INSERT INTO wiki VALUES(%s,%s,%s,%s,%s,%s,%s,%s)",
('TestPage', 1, 42, 'joe', '::1', 'Bla bla', 'Testing', 0))
page = WikiPage(self.env, 'TestPage')
page.delete()
self.assertEqual(False, page.exists)
self.assertEqual([], self.env.db_query("""
SELECT version, time, author, ipnr, text, comment, readonly
FROM wiki WHERE name=%s
""", ('TestPage',)))
listener = TestWikiChangeListener(self.env)
self.assertEqual(page, listener.deleted[0])
def test_delete_page_version(self):
self.env.db_transaction.executemany(
"INSERT INTO wiki VALUES(%s,%s,%s,%s,%s,%s,%s,%s)",
[('TestPage', 1, 42, 'joe', '::1', 'Bla bla', 'Testing', 0),
('TestPage', 2, 43, 'kate', '192.168.0.11', 'Bla', 'Changing', 0)])
page = WikiPage(self.env, 'TestPage')
page.delete(version=2)
self.assertEqual(True, page.exists)
self.assertEqual(
[(1, 42, 'joe', '::1', 'Bla bla', 'Testing', 0)],
self.env.db_query("""
SELECT version, time, author, ipnr, text, comment, readonly
FROM wiki WHERE name=%s
""", ('TestPage',)))
listener = TestWikiChangeListener(self.env)
self.assertEqual(page, listener.deleted_version[0])
def test_delete_page_last_version(self):
self.env.db_transaction(
"INSERT INTO wiki VALUES(%s,%s,%s,%s,%s,%s,%s,%s)",
('TestPage', 1, 42, 'joe', '::1', 'Bla bla', 'Testing', 0))
page = WikiPage(self.env, 'TestPage')
page.delete(version=1)
self.assertEqual(False, page.exists)
self.assertEqual([], self.env.db_query("""
SELECT version, time, author, ipnr, text, comment, readonly
FROM wiki WHERE name=%s
""", ('TestPage',)))
listener = TestWikiChangeListener(self.env)
self.assertEqual(page, listener.deleted[0])
def test_rename_page(self):
data = (1, 42, 'joe', '::1', 'Bla bla', 'Testing', 0)
self.env.db_transaction(
"INSERT INTO wiki VALUES(%s,%s,%s,%s,%s,%s,%s,%s)",
('TestPage',) + data)
attachment = Attachment(self.env, 'wiki', 'TestPage')
attachment.insert('foo.txt', StringIO(), 0, 1)
page = WikiPage(self.env, 'TestPage')
page.rename('PageRenamed')
self.assertEqual('PageRenamed', page.name)
self.assertEqual([data], self.env.db_query("""
SELECT version, time, author, ipnr, text, comment, readonly
FROM wiki WHERE name=%s
""", ('PageRenamed',)))
attachments = Attachment.select(self.env, 'wiki', 'PageRenamed')
self.assertEqual('foo.txt', attachments.next().filename)
self.assertRaises(StopIteration, attachments.next)
Attachment.delete_all(self.env, 'wiki', 'PageRenamed')
old_page = WikiPage(self.env, 'TestPage')
self.assertEqual(False, old_page.exists)
self.assertEqual([], self.env.db_query("""
SELECT version, time, author, ipnr, text, comment, readonly
FROM wiki WHERE name=%s
""", ('TestPage',)))
listener = TestWikiChangeListener(self.env)
self.assertEqual((page, 'TestPage'), listener.renamed[0])
def test_invalid_page_name(self):
invalid_names = ('../Page', 'Page/..', 'Page/////SubPage',
'Page/./SubPage', '/PagePrefix', 'PageSuffix/')
for name in invalid_names:
page = WikiPage(self.env)
page.name = name
page.text = 'Bla bla'
t = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
self.assertRaises(TracError, page.save, 'joe', 'Testing', '::1', t)
page = WikiPage(self.env)
page.name = 'TestPage'
page.text = 'Bla bla'
t = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
page.save('joe', 'Testing', '::1', t)
for name in invalid_names:
page = WikiPage(self.env, 'TestPage')
self.assertRaises(TracError, page.rename, name)
class WikiResourceChangeListenerTestCase(unittest.TestCase):
INITIAL_NAME = "Wiki page 1"
INITIAL_TEXT = "some text"
INITIAL_AUTHOR = "anAuthor"
INITIAL_COMMENT = "some comment"
INITIAL_REMOTE_ADDRESS = "::1"
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.listener = TestResourceChangeListener(self.env)
self.listener.resource_type = WikiPage
self.listener.callback = self.listener_callback
def tearDown(self):
self.env.reset_db()
def test_change_listener_created(self):
self._create_wiki_page(self.INITIAL_NAME)
self.assertEqual('created', self.listener.action)
self.assertTrue(isinstance(self.listener.resource, WikiPage))
self.assertEqual(self.INITIAL_NAME, self.wiki_name)
self.assertEqual(self.INITIAL_TEXT, self.wiki_text)
def test_change_listener_text_changed(self):
wiki_page = self._create_wiki_page(self.INITIAL_NAME)
CHANGED_TEXT = "some other text"
wiki_page.text = CHANGED_TEXT
wiki_page.save("author1", "renamed_comment", "::2")
self.assertEqual('changed', self.listener.action)
self.assertTrue(isinstance(self.listener.resource, WikiPage))
self.assertEqual(self.INITIAL_NAME, self.wiki_name)
self.assertEqual(CHANGED_TEXT, self.wiki_text)
self.assertEqual({"text":self.INITIAL_TEXT}, self.listener.old_values)
def test_change_listener_renamed(self):
wiki_page = self._create_wiki_page(self.INITIAL_NAME)
CHANGED_NAME = "NewWikiName"
wiki_page.rename(CHANGED_NAME)
self.assertEqual('changed', self.listener.action)
self.assertTrue(isinstance(self.listener.resource, WikiPage))
self.assertEqual(CHANGED_NAME, self.wiki_name)
self.assertEqual(self.INITIAL_TEXT, self.wiki_text)
self.assertEqual({"name":self.INITIAL_NAME}, self.listener.old_values)
def test_change_listener_deleted(self):
wiki_page = self._create_wiki_page(self.INITIAL_NAME)
wiki_page.delete()
self.assertEqual('deleted', self.listener.action)
self.assertTrue(isinstance(self.listener.resource, WikiPage))
self.assertEqual(self.INITIAL_NAME, self.wiki_name)
def _create_wiki_page(self, name=None):
name = name or self.INITIAL_NAME
wiki_page = WikiPage(self.env, name)
wiki_page.text = self.INITIAL_TEXT
wiki_page.save(
self.INITIAL_AUTHOR,
self.INITIAL_COMMENT,
self.INITIAL_REMOTE_ADDRESS)
return wiki_page
def listener_callback(self, action, resource, context, old_values = None):
self.wiki_name = resource.name
self.wiki_text = resource.text
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WikiPageTestCase, 'test'))
suite.addTest(unittest.makeSuite(
WikiResourceChangeListenerTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FirewallRulesOperations:
"""FirewallRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.datalake.store.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_account(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> AsyncIterable["_models.FirewallRuleListResult"]:
"""Lists the Data Lake Store firewall rules within the specified Data Lake Store account.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FirewallRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.datalake.store.models.FirewallRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_account.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('FirewallRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
firewall_rule_name: str,
parameters: "_models.CreateOrUpdateFirewallRuleParameters",
**kwargs
) -> "_models.FirewallRule":
"""Creates or updates the specified firewall rule. During update, the firewall rule with the
specified name will be replaced with this new firewall rule.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:param firewall_rule_name: The name of the firewall rule to create or update.
:type firewall_rule_name: str
:param parameters: Parameters supplied to create or update the firewall rule.
:type parameters: ~azure.mgmt.datalake.store.models.CreateOrUpdateFirewallRuleParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallRule, or the result of cls(response)
:rtype: ~azure.mgmt.datalake.store.models.FirewallRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CreateOrUpdateFirewallRuleParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules/{firewallRuleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
account_name: str,
firewall_rule_name: str,
**kwargs
) -> "_models.FirewallRule":
"""Gets the specified Data Lake Store firewall rule.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:param firewall_rule_name: The name of the firewall rule to retrieve.
:type firewall_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallRule, or the result of cls(response)
:rtype: ~azure.mgmt.datalake.store.models.FirewallRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules/{firewallRuleName}'} # type: ignore
async def update(
self,
resource_group_name: str,
account_name: str,
firewall_rule_name: str,
parameters: Optional["_models.UpdateFirewallRuleParameters"] = None,
**kwargs
) -> "_models.FirewallRule":
"""Updates the specified firewall rule.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:param firewall_rule_name: The name of the firewall rule to update.
:type firewall_rule_name: str
:param parameters: Parameters supplied to update the firewall rule.
:type parameters: ~azure.mgmt.datalake.store.models.UpdateFirewallRuleParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallRule, or the result of cls(response)
:rtype: ~azure.mgmt.datalake.store.models.FirewallRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'UpdateFirewallRuleParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules/{firewallRuleName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
account_name: str,
firewall_rule_name: str,
**kwargs
) -> None:
"""Deletes the specified firewall rule from the specified Data Lake Store account.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:param firewall_rule_name: The name of the firewall rule to delete.
:type firewall_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-11-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules/{firewallRuleName}'} # type: ignore
|
|
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.db import models
from django.db import transaction
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from satchless.process import InvalidData
from .forms import DeliveryForm
from ..checkout.forms import AnonymousEmailForm
from ..core.utils import BaseStep
from ..delivery import get_delivery_options_for_items
from ..userprofile.forms import AddressForm
from ..userprofile.models import Address, User
class BaseCheckoutStep(BaseStep):
def __init__(self, request, storage):
super(BaseCheckoutStep, self).__init__(request)
self.storage = storage
@models.permalink
def get_absolute_url(self):
return ('checkout:details', (), {'step': str(self)})
def add_to_order(self, order):
raise NotImplementedError()
class BaseAddressStep(BaseCheckoutStep):
template = 'checkout/address.html'
def __init__(self, request, storage, address):
super(BaseAddressStep, self).__init__(request, storage)
self.address = address
existing_selected = False
address_form = AddressForm(request.POST or None, instance=self.address)
if request.user.is_authenticated():
addresses = list(request.user.addresses.all())
for address in addresses:
data = Address.objects.as_data(address)
instance = Address(**data)
address.form = AddressForm(instance=instance)
address.is_selected = Address.objects.are_identical(
address, self.address)
if address.is_selected:
existing_selected = True
else:
addresses = []
self.existing_selected = existing_selected
self.forms = {'address': address_form}
self.addresses = addresses
def forms_are_valid(self):
address_form = self.forms['address']
return address_form.is_valid()
def validate(self):
try:
self.address.clean_fields()
except ValidationError as e:
raise InvalidData(e.messages)
def process(self, extra_context=None):
context = dict(extra_context or {})
context['form'] = self.forms['address']
context['addresses'] = self.addresses
context['existing_address_selected'] = self.existing_selected
return super(BaseAddressStep, self).process(extra_context=context)
class BillingAddressStep(BaseAddressStep):
template = 'checkout/billing.html'
title = _('Billing Address')
def __init__(self, request, storage):
address_data = storage.get('address', {})
address = Address(**address_data)
skip = False
if not address_data and request.user.is_authenticated():
if request.user.default_billing_address:
address = request.user.default_billing_address
skip = True
elif request.user.addresses.count() == 1:
address = request.user.addresses.all()[0].address
skip = True
super(BillingAddressStep, self).__init__(request, storage, address)
if not request.user.is_authenticated():
self.anonymous_user_email = self.storage.get(
'anonymous_user_email')
initial = {'email': self.anonymous_user_email}
self.forms['anonymous'] = AnonymousEmailForm(request.POST or None,
initial=initial)
else:
self.anonymous_user_email = ''
if skip:
self.save()
def __str__(self):
return 'billing-address'
def forms_are_valid(self):
forms_are_valid = super(BillingAddressStep, self).forms_are_valid()
if 'anonymous' not in self.forms:
return forms_are_valid
anonymous_form = self.forms['anonymous']
if forms_are_valid and anonymous_form.is_valid():
self.anonymous_user_email = anonymous_form.cleaned_data['email']
return True
return False
def save(self):
self.storage['anonymous_user_email'] = self.anonymous_user_email
self.storage['address'] = Address.objects.as_data(self.address)
def add_to_order(self, order):
self.address.save()
order.anonymous_user_email = self.anonymous_user_email
order.billing_address = self.address
if order.user:
User.objects.store_address(order.user, self.address, billing=True)
def validate(self):
super(BillingAddressStep, self).validate()
if 'anonymous' in self.forms and not self.anonymous_user_email:
raise InvalidData()
class ShippingStep(BaseAddressStep):
template = 'checkout/shipping.html'
title = _('Shipping Address')
def __init__(self, request, storage, cart,
default_address=None):
self.cart = cart
address_data = storage.get('address', {})
if not address_data and default_address:
address = default_address
else:
address = Address(**address_data)
super(ShippingStep, self).__init__(request, storage, address)
delivery_choices = list(
(m.name, m) for m in get_delivery_options_for_items(
self.cart, address=address))
selected_method_name = storage.get('delivery_method')
selected_method = None
for method_name, method in delivery_choices:
if method_name == selected_method_name:
selected_method = method
break
if selected_method is None:
# TODO: find cheapest not first
selected_method_name, selected_method = delivery_choices[0]
self.delivery_method = selected_method
self.forms['delivery'] = DeliveryForm(
delivery_choices, request.POST or None,
initial={'method': selected_method_name})
def __str__(self):
return 'shipping-address'
def save(self):
delivery_form = self.forms['delivery']
self.storage['address'] = Address.objects.as_data(self.address)
delivery_method = delivery_form.cleaned_data['method']
self.storage['delivery_method'] = delivery_method
def validate(self):
super(ShippingStep, self).validate()
if 'delivery_method' not in self.storage:
raise InvalidData()
def forms_are_valid(self):
base_forms_are_valid = super(ShippingStep, self).forms_are_valid()
delivery_form = self.forms['delivery']
if base_forms_are_valid and delivery_form.is_valid():
return True
return False
def add_to_order(self, order):
self.address.save()
order.shipping_method = self.delivery_method.name
order.shipping_address = self.address
if order.user:
User.objects.store_address(order.user, self.address, shipping=True)
def process(self, extra_context=None):
context = dict(extra_context or {})
context['delivery_form'] = self.forms['delivery']
return super(ShippingStep, self).process(extra_context=context)
class SummaryStep(BaseCheckoutStep):
template = 'checkout/summary.html'
title = _('Summary')
def __init__(self, request, storage, checkout):
self.checkout = checkout
super(SummaryStep, self).__init__(request, storage)
def __str__(self):
return 'summary'
def process(self, extra_context=None):
context = dict(extra_context or {})
context['all_steps_valid'] = self.forms_are_valid()
response = super(SummaryStep, self).process(context)
if not response:
with transaction.atomic():
order = self.checkout.create_order()
order.create_history_entry()
order.send_confirmation_email()
return redirect('order:payment', token=order.token)
return response
def validate(self):
raise InvalidData()
def forms_are_valid(self):
next_step = self.checkout.get_next_step()
return next_step == self
def save(self):
pass
def add_to_order(self, order):
order.save()
if self.checkout.is_shipping_required():
method = self.checkout.shipping.delivery_method
else:
method = None
for partition in self.checkout.cart.partition():
shipping_required = partition.is_shipping_required()
if shipping_required and method:
shipping_price = method.get_delivery_total(partition)
else:
shipping_price = 0
group = order.groups.create(
shipping_required=shipping_required,
shipping_price=shipping_price)
group.add_items_from_partition(partition)
self.checkout.clear_storage()
|
|
#!/usr/bin/python
import binascii, re, json, copy, sys
from bitcoin.main import *
### Hex to bin converter and vice versa for objects
def json_is_base(obj, base):
alpha = get_code_string(base)
if isinstance(obj, (str, unicode)):
for i in range(len(obj)):
if alpha.find(obj[i]) == -1:
return False
return True
elif isinstance(obj, (int, float, long)) or obj is None:
return True
elif isinstance(obj, list):
for i in range(len(obj)):
if not json_is_base(obj[i], base):
return False
return True
else:
for x in obj:
if not json_is_base(obj[x], base):
return False
return True
def json_changebase(obj, changer):
if isinstance(obj, (str, unicode)):
return changer(obj)
elif isinstance(obj, (int, float, long)) or obj is None:
return obj
elif isinstance(obj, list):
return [json_changebase(x, changer) for x in obj]
return dict((x, json_changebase(obj[x], changer)) for x in obj)
# Transaction serialization and deserialization
def deserialize(tx):
if re.match('^[0-9a-fA-F]*$', tx):
return json_changebase(deserialize(binascii.unhexlify(tx)),
lambda x: binascii.hexlify(x))
# http://stackoverflow.com/questions/4851463/python-closure-write-to-variable-in-parent-scope
# Python's scoping rules are demented, requiring me to make pos an object
# so that it is call-by-reference
pos = [0]
def read_as_int(bytez):
pos[0] += bytez
return decode(tx[pos[0]-bytez:pos[0]][::-1], 256)
def read_var_int():
pos[0] += 1
if ord(tx[pos[0]-1]) < 253:
return ord(tx[pos[0]-1])
return read_as_int(pow(2, ord(tx[pos[0]-1]) - 252))
def read_bytes(bytez):
pos[0] += bytez
return tx[pos[0]-bytez:pos[0]]
def read_var_string():
size = read_var_int()
return read_bytes(size)
obj = {"ins": [], "outs": []}
obj["version"] = read_as_int(4)
ins = read_var_int()
for i in range(ins):
obj["ins"].append({
"outpoint": {
"hash": read_bytes(32)[::-1],
"index": read_as_int(4)
},
"script": read_var_string(),
"sequence": read_as_int(4)
})
outs = read_var_int()
for i in range(outs):
obj["outs"].append({
"value": read_as_int(8),
"script": read_var_string()
})
obj["locktime"] = read_as_int(4)
return obj
def serialize(txobj):
o = []
if json_is_base(txobj, 16):
return binascii.hexlify(serialize(json_changebase(txobj,
lambda x: binascii.unhexlify(x))))
o.append(encode(txobj["version"], 256, 4)[::-1])
o.append(num_to_var_int(len(txobj["ins"])))
for inp in txobj["ins"]:
o.append(inp["outpoint"]["hash"][::-1])
o.append(encode(inp["outpoint"]["index"], 256, 4)[::-1])
o.append(num_to_var_int(len(inp["script"]))+inp["script"])
o.append(encode(inp["sequence"], 256, 4)[::-1])
o.append(num_to_var_int(len(txobj["outs"])))
for out in txobj["outs"]:
o.append(encode(out["value"], 256, 8)[::-1])
o.append(num_to_var_int(len(out["script"]))+out["script"])
o.append(encode(txobj["locktime"], 256, 4)[::-1])
return ''.join(o)
# Hashing transactions for signing
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
# this works like SIGHASH_ANYONECANPAY | SIGHASH_ALL, might as well make it explicit while
# we fix the constant
SIGHASH_ANYONECANPAY = 0x81
def signature_form(tx, i, script, hashcode=SIGHASH_ALL):
i, hashcode = int(i), int(hashcode)
if isinstance(tx, (str, unicode)):
return serialize(signature_form(deserialize(tx), i, script, hashcode))
newtx = copy.deepcopy(tx)
for inp in newtx["ins"]:
inp["script"] = ""
newtx["ins"][i]["script"] = script
if hashcode == SIGHASH_NONE:
newtx["outs"] = []
elif hashcode == SIGHASH_SINGLE:
newtx["outs"] = newtx["outs"][:len(newtx["ins"])]
for out in range(len(newtx["ins"]) - 1):
out.value = 2**64 - 1
out.script = ""
elif hashcode == SIGHASH_ANYONECANPAY:
newtx["ins"] = [newtx["ins"][i]]
else:
pass
return newtx
# Making the actual signatures
def der_encode_sig(v, r, s):
b1, b2 = binascii.hexlify(encode(r, 256)), binascii.hexlify(encode(s, 256))
if r >= 2**255:
b1 = '00' + b1
if s >= 2**255:
b2 = '00' + b2
left = '02'+encode(len(b1)/2, 16, 2)+b1
right = '02'+encode(len(b2)/2, 16, 2)+b2
return '30'+encode(len(left+right)/2, 16, 2)+left+right
def der_decode_sig(sig):
leftlen = decode(sig[6:8], 16)*2
left = sig[8:8+leftlen]
rightlen = decode(sig[10+leftlen:12+leftlen], 16)*2
right = sig[12+leftlen:12+leftlen+rightlen]
return (None, decode(left, 16), decode(right, 16))
def txhash(tx, hashcode=None):
if re.match('^[0-9a-fA-F]*$', tx):
tx = changebase(tx, 16, 256)
if hashcode:
return dbl_sha256(tx + encode(int(hashcode), 256, 4)[::-1])
else:
return binascii.hexlify(bin_dbl_sha256(tx)[::-1])
def bin_txhash(tx, hashcode=None):
return binascii.unhexlify(txhash(tx, hashcode))
def ecdsa_tx_sign(tx, priv, hashcode=SIGHASH_ALL):
rawsig = ecdsa_raw_sign(bin_txhash(tx, hashcode), priv)
return der_encode_sig(*rawsig)+encode(hashcode, 16, 2)
def ecdsa_tx_verify(tx, sig, pub, hashcode=SIGHASH_ALL):
return ecdsa_raw_verify(bin_txhash(tx, hashcode), der_decode_sig(sig), pub)
def ecdsa_tx_recover(tx, sig, hashcode=SIGHASH_ALL):
z = bin_txhash(tx, hashcode)
_, r, s = der_decode_sig(sig)
left = ecdsa_raw_recover(z, (0, r, s))
right = ecdsa_raw_recover(z, (1, r, s))
return (encode_pubkey(left, 'hex'), encode_pubkey(right, 'hex'))
# Scripts
def mk_pubkey_script(addr):
# Keep the auxiliary functions around for altcoins' sake
return '76a914' + b58check_to_hex(addr) + '88ac'
def mk_scripthash_script(addr):
return 'a914' + b58check_to_hex(addr) + '87'
# Address representation to output script
def address_to_script(addr):
if addr[0] == '3' or addr[0] == '2':
return mk_scripthash_script(addr)
else:
return mk_pubkey_script(addr)
# Output script to address representation
def script_to_address(script, vbyte=0):
if re.match('^[0-9a-fA-F]*$', script):
script = binascii.unhexlify(script)
if script[:3] == '\x76\xa9\x14' and script[-2:] == '\x88\xac' and len(script) == 25:
return bin_to_b58check(script[3:-2], vbyte) # pubkey hash addresses
else:
if vbyte == 111:
# Testnet
scripthash_byte = 196
else:
scripthash_byte = 5
# BIP0016 scripthash addresses
return bin_to_b58check(script[2:-1], scripthash_byte)
def p2sh_scriptaddr(script, magicbyte=5):
if re.match('^[0-9a-fA-F]*$', script):
script = binascii.unhexlify(script)
return hex_to_b58check(hash160(script), magicbyte)
scriptaddr = p2sh_scriptaddr
def deserialize_script(script):
if re.match('^[0-9a-fA-F]*$', script):
return json_changebase(deserialize_script(binascii.unhexlify(script)),
lambda x: binascii.hexlify(x))
out, pos = [], 0
while pos < len(script):
code = ord(script[pos])
if code == 0:
out.append(None)
pos += 1
elif code <= 75:
out.append(script[pos+1:pos+1+code])
pos += 1 + code
elif code <= 78:
szsz = pow(2, code - 76)
sz = decode(script[pos+szsz: pos:-1], 256)
out.append(script[pos + 1 + szsz:pos + 1 + szsz + sz])
pos += 1 + szsz + sz
elif code <= 96:
out.append(code - 80)
pos += 1
else:
out.append(code)
pos += 1
return out
def serialize_script_unit(unit):
if isinstance(unit, int):
if unit < 16:
return chr(unit + 80)
else:
return chr(unit)
elif unit is None:
return '\x00'
else:
if len(unit) <= 75:
return chr(len(unit))+unit
elif len(unit) < 256:
return chr(76)+chr(len(unit))+unit
elif len(unit) < 65536:
return chr(77)+encode(len(unit), 256, 2)[::-1]+unit
else:
return chr(78)+encode(len(unit), 256, 4)[::-1]+unit
def serialize_script(script):
if json_is_base(script, 16):
return binascii.hexlify(serialize_script(json_changebase(script,
lambda x: binascii.unhexlify(x))))
return ''.join(map(serialize_script_unit, script))
def mk_multisig_script(*args): # [pubs],k or pub1,pub2...pub[n],k
if isinstance(args[0], list):
pubs, k = args[0], int(args[1])
else:
pubs = list(filter(lambda x: len(str(x)) >= 32, args))
k = int(args[len(pubs)])
return serialize_script([k]+pubs+[len(pubs), 174])
# Signing and verifying
def verify_tx_input(tx, i, script, sig, pub):
if re.match('^[0-9a-fA-F]*$', tx):
tx = binascii.unhexlify(tx)
if re.match('^[0-9a-fA-F]*$', script):
script = binascii.unhexlify(script)
if not re.match('^[0-9a-fA-F]*$', sig):
sig = binascii.hexlify(sig)
hashcode = decode(sig[-2:], 16)
modtx = signature_form(tx, int(i), script, hashcode)
return ecdsa_tx_verify(modtx, sig, pub, hashcode)
def sign(tx, i, priv, hashcode=SIGHASH_ALL):
i = int(i)
if not re.match('^[0-9a-fA-F]*$', tx):
return binascii.unhexlify(sign(binascii.hexlify(tx), i, priv))
if len(priv) <= 33:
priv = binascii.hexlify(priv)
pub = privkey_to_pubkey(priv)
address = pubkey_to_address(pub)
signing_tx = signature_form(tx, i, mk_pubkey_script(address), hashcode)
sig = ecdsa_tx_sign(signing_tx, priv, hashcode)
txobj = deserialize(tx)
txobj["ins"][i]["script"] = serialize_script([sig, pub])
return serialize(txobj)
def signall(tx, priv):
# if priv is a dictionary, assume format is
# { 'txinhash:txinidx' : privkey }
if isinstance(priv, dict):
for e, i in enumerate(deserialize(tx)["ins"]):
k = priv["%s:%d" % (i["outpoint"]["hash"], i["outpoint"]["index"])]
tx = sign(tx, e, k)
else:
for i in range(len(deserialize(tx)["ins"])):
tx = sign(tx, i, priv)
return tx
def multisign(tx, i, script, pk, hashcode=SIGHASH_ALL):
if re.match('^[0-9a-fA-F]*$', tx):
tx = binascii.unhexlify(tx)
if re.match('^[0-9a-fA-F]*$', script):
script = binascii.unhexlify(script)
modtx = signature_form(tx, i, script, hashcode)
return ecdsa_tx_sign(modtx, pk, hashcode)
def apply_multisignatures(*args):
# tx,i,script,sigs OR tx,i,script,sig1,sig2...,sig[n]
tx, i, script = args[0], int(args[1]), args[2]
sigs = args[3] if isinstance(args[3], list) else list(args[3:])
if re.match('^[0-9a-fA-F]*$', script):
script = binascii.unhexlify(script)
sigs = [binascii.unhexlify(x) if x[:2] == '30' else x for x in sigs]
if re.match('^[0-9a-fA-F]*$', tx):
return binascii.hexlify(apply_multisignatures(binascii.unhexlify(tx), i, script, sigs))
txobj = deserialize(tx)
txobj["ins"][i]["script"] = serialize_script([None]+sigs+[script])
return serialize(txobj)
def is_inp(arg):
# inputs from INSIGHT have this field. This is input definitely
return "confirmationsFromCache" in arg
def mktx(*args):
# [in0, in1...],[out0, out1...] or in0, in1 ... out0 out1 ...
ins, outs = [], []
for arg in args:
if isinstance(arg, list):
for a in arg: (ins if is_inp(a) else outs).append(a)
else:
(ins if is_inp(arg) else outs).append(arg)
txobj = {"locktime": 0, "version": 1, "ins": [], "outs": []}
for i in ins:
txobj["ins"].append({
"outpoint": {"hash": i['txid'], "index": int(i['vout'])},
"script": "",
"sequence": 4294967295
})
for o in outs:
if isinstance(o, (str, unicode)):
addr = o[:o.find(':')]
val = int(o[o.find(':')+1:])
o = {}
if re.match('^[0-9a-fA-F]*$', addr):
o["script"] = addr
else:
o["address"] = addr
o["value"] = val
outobj = {}
if "address" in o:
outobj["script"] = address_to_script(o["address"])
elif "script" in o:
outobj["script"] = o["script"]
else:
raise Exception("Could not find 'address' or 'script' in output.")
outobj["value"] = o["value"]
txobj["outs"].append(outobj)
return serialize(txobj)
def select(unspent, value):
value = int(value)
high = [u for u in unspent if u["value"] >= value]
high.sort(key=lambda u: u["value"])
low = [u for u in unspent if u["value"] < value]
low.sort(key=lambda u: -u["value"])
if len(high):
return [high[0]]
i, tv = 0, 0
while tv < value and i < len(low):
tv += low[i]["value"]
i += 1
if tv < value:
raise Exception("Not enough funds")
return low[:i]
# Only takes inputs of the form { "output": blah, "value": foo }
def mksend(*args):
argz, change, fee = args[:-2], args[-2], int(args[-1])
ins, outs = [], []
for arg in argz:
if isinstance(arg, list):
for a in arg:
(ins if is_inp(a) else outs).append(a)
else:
(ins if is_inp(arg) else outs).append(arg)
isum = sum([i["value"] for i in ins])
osum, outputs2 = 0, []
for o in outs:
if isinstance(o, (str, unicode)):
o2 = {
"address": o[:o.find(':')],
"value": int(o[o.find(':')+1:])
}
else:
o2 = o
outputs2.append(o2)
osum += o2["value"]
if isum < osum+fee:
raise Exception("Not enough money")
elif isum > osum+fee+5430:
outputs2 += [{"address": change, "value": isum-osum-fee}]
return mktx(ins, outputs2)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Iterator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat as forward_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
class IteratorTest(test.TestCase, parameterized.TestCase):
@test_util.deprecated_graph_mode_only
def testNoGradients(self):
component = constant_op.constant([1.])
side = constant_op.constant(0.)
add = lambda x: x + side
dataset = dataset_ops.Dataset.from_tensor_slices(component).map(add)
value = dataset_ops.make_one_shot_iterator(dataset).get_next()
self.assertIsNone(gradients_impl.gradients(value, component)[0])
self.assertIsNone(gradients_impl.gradients(value, side)[0])
self.assertIsNone(gradients_impl.gradients(value, [component, side])[0])
@test_util.deprecated_graph_mode_only
def testCapturingStateInOneShotRaisesException(self):
var = variables.Variable(37.0, name="myvar")
dataset = (
dataset_ops.Dataset.from_tensor_slices([0.0, 1.0, 2.0])
.map(lambda x: x + var))
with self.assertRaisesRegexp(
ValueError, r"`Dataset.make_one_shot_iterator\(\)` does not support "
"datasets that capture stateful objects.+myvar"):
dataset_ops.make_one_shot_iterator(dataset)
@test_util.deprecated_graph_mode_only
def testOneShotIterator(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(14))
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testOneShotIteratorCaptureByValue(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
tensor_components = tuple([ops.convert_to_tensor(c) for c in components])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(tensor_components)
.map(_map_fn).repeat(14))
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorInsideContainer(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def within_container():
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn).repeat(14))
return iterator.get_next()
server = server_lib.Server.create_local_server()
# Create two iterators within unique containers, and run them to
# make sure that the resources aren't shared.
#
# The test below would fail if cname were the same across both
# sessions.
for j in range(2):
with session.Session(server.target) as sess:
cname = "iteration%d" % j
with ops.container(cname):
get_next = within_container()
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testOneShotIteratorNonBlocking(self):
dataset = dataset_ops.Dataset.from_tensors([1, 2, 3]).map(lambda x: x * x)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
# Create a session with a single thread to ensure that the
# one-shot iterator initializer does not deadlock.
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, use_per_session_threads=True)
with session.Session(config=config) as sess:
self.assertAllEqual([1, 4, 9], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Test with multiple threads invoking the one-shot iterator concurrently.
with session.Session(config=config) as sess:
results = []
def consumer_thread():
try:
results.append(sess.run(next_element))
except errors.OutOfRangeError:
results.append(None)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(num_threads, len(results))
self.assertEqual(num_threads - 1,
len([None for r in results if r is None]))
self.assertAllEqual([[1, 4, 9]], [r for r in results if r is not None])
@test_util.deprecated_graph_mode_only
def testOneShotIteratorInitializerFails(self):
# Define a dataset whose initialization will always fail.
dataset = dataset_ops.Dataset.from_tensors(
array_ops.check_numerics(
constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
# Test that subsequent attempts to use the iterator also fail.
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
with self.cached_session() as sess:
def consumer_thread():
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
@test_util.deprecated_graph_mode_only
def testSimpleSharedResource(self):
components = (np.array(1, dtype=np.int64),
np.array([1, 2, 3], dtype=np.int64),
np.array(37.0, dtype=np.float64))
server = server_lib.Server.create_local_server()
# Create two non-overlapping sessions that share the same iterator
# resource on the same server, and verify that an action of the
# first session (initializing the iterator) is visible in the
# second session.
with ops.Graph().as_default():
iterator = (
dataset_ops.Dataset.from_tensors(components)
.map(lambda x, y, z: (x, y, z)).make_initializable_iterator(
shared_name="shared_iterator"))
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(server.target) as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Re-initialize the iterator in the first session.
sess.run(init_op)
with ops.Graph().as_default():
# Re-define the iterator manually, without defining any of the
# functions in this graph, to ensure that we are not
# accidentally redefining functions with the same names in the
# new graph.
iterator = iterator_ops.Iterator.from_structure(
shared_name="shared_iterator",
output_types=(dtypes.int64, dtypes.int64, dtypes.float64),
output_shapes=([], [3], []))
get_next = iterator.get_next()
with session.Session(server.target) as sess:
# Use the iterator without re-initializing in the second session.
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testNotInitializedError(self):
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(components))
get_next = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"iterator has not been initialized"):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testReinitializableIterator(self):
dataset_3 = dataset_ops.Dataset.from_tensors(
constant_op.constant([1, 2, 3]))
dataset_4 = dataset_ops.Dataset.from_tensors(
constant_op.constant([4, 5, 6, 7]))
iterator = iterator_ops.Iterator.from_structure(dataset_3.output_types,
[None])
dataset_3_init_op = iterator.make_initializer(dataset_3)
dataset_4_init_op = iterator.make_initializer(dataset_4)
get_next = iterator.get_next()
self.assertEqual(dataset_3.output_types, iterator.output_types)
self.assertEqual(dataset_4.output_types, iterator.output_types)
self.assertEqual(
[None], dataset_ops.get_legacy_output_shapes(iterator).as_list())
with self.cached_session() as sess:
# The iterator is initially uninitialized.
with self.assertRaises(errors.FailedPreconditionError):
sess.run(get_next)
# Initialize with one dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Initialize with a different dataset.
sess.run(dataset_4_init_op)
self.assertAllEqual([4, 5, 6, 7], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Reinitialize with the first dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@test_util.deprecated_graph_mode_only
def testReinitializableIteratorWithFunctions(self):
def g():
for i in range(10):
yield i
iterator = iterator_ops.Iterator.from_structure(dtypes.int64, [])
next_element = iterator.get_next()
with self.cached_session() as sess:
dataset_1 = dataset_ops.Dataset.from_generator(
g, output_types=dtypes.int64)
sess.run(iterator.make_initializer(dataset_1))
for expected in range(10):
self.assertEqual(expected, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
dataset_2 = dataset_ops.Dataset.from_generator(
g, output_types=dtypes.int64)
sess.run(iterator.make_initializer(dataset_2))
for expected in range(10):
self.assertEqual(expected, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testReinitializableIteratorStaticErrors(self):
# Non-matching structure for types and shapes.
with self.assertRaises(TypeError):
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), [None])
# Test validation of dataset argument.
iterator = iterator_ops.Iterator.from_structure((dtypes.int64,
dtypes.float64))
# Incompatible structure.
with self.assertRaises(ValueError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(((constant_op.constant(
[1, 2, 3], dtype=dtypes.int64),), (constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float64),))))
# Incompatible types.
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int32),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float32))))
# Incompatible shapes.
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), ([None], []))
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int64),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float64))))
@test_util.deprecated_graph_mode_only
def testIteratorStringHandle(self):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_4 = dataset_ops.make_one_shot_iterator(dataset_4)
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
next_element = feedable_iterator.get_next()
self.assertTrue(dataset_ops.get_structure(dataset_3).is_compatible_with(
dataset_ops.get_structure(feedable_iterator)))
self.assertTrue(dataset_ops.get_structure(dataset_4).is_compatible_with(
dataset_ops.get_structure(feedable_iterator)))
with self.cached_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
@test_util.deprecated_graph_mode_only
def testIteratorStringHandleFuture(self):
with forward_compat.forward_compatibility_horizon(2018, 8, 4):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_4 = dataset_ops.make_one_shot_iterator(dataset_4)
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
next_element = feedable_iterator.get_next()
self.assertTrue(dataset_ops.get_structure(dataset_3).is_compatible_with(
dataset_ops.get_structure(feedable_iterator)))
self.assertTrue(dataset_ops.get_structure(dataset_4).is_compatible_with(
dataset_ops.get_structure(feedable_iterator)))
with self.cached_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(
10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
@test_util.deprecated_graph_mode_only
def testIteratorStringHandleReuseTensorObject(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
one_shot_iterator = dataset_ops.make_one_shot_iterator(dataset)
initializable_iterator = dataset_ops.make_initializable_iterator(dataset)
structure_iterator = iterator_ops.Iterator.from_structure(
dataset.output_types)
created_ops = len(ops.get_default_graph().get_operations())
self.assertIs(one_shot_iterator.string_handle(),
one_shot_iterator.string_handle())
self.assertIs(initializable_iterator.string_handle(),
initializable_iterator.string_handle())
self.assertIs(structure_iterator.string_handle(),
structure_iterator.string_handle())
# Assert that getting the (default) string handle creates no ops.
self.assertEqual(created_ops, len(ops.get_default_graph().get_operations()))
# Specifying an explicit name will create a new op.
handle_with_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo", handle_with_name.op.name)
self.assertIsNot(one_shot_iterator.string_handle(), handle_with_name)
handle_with_same_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo_1", handle_with_same_name.op.name)
self.assertIsNot(handle_with_name, handle_with_same_name)
@test_util.deprecated_graph_mode_only
def testIteratorStringHandleError(self):
dataset_int_scalar = (
dataset_ops.Dataset.from_tensor_slices([1, 2, 3]).repeat())
dataset_float_vector = (dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]))
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_int_scalar = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [])
feedable_int_vector = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [None])
feedable_int_any = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32)
with self.cached_session() as sess:
handle_int_scalar = sess.run(dataset_ops.make_one_shot_iterator(
dataset_int_scalar).string_handle())
handle_float_vector = sess.run(dataset_ops.make_one_shot_iterator(
dataset_float_vector).string_handle())
self.assertEqual(1,
sess.run(
feedable_int_scalar.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
self.assertEqual(2,
sess.run(
feedable_int_any.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_float_vector}))
@test_util.deprecated_graph_mode_only
def testRemoteIteratorUsingRemoteCallOpDirectSession(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 3
with ops.device("/job:localhost/replica:0/task:0/cpu:1"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_3_handle = iterator_3.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
remote_op = functional_ops.remote_call(
args=[iterator_3_handle],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.session(config=worker_config) as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [1])
# Fails when target is cpu:2 where the resource is not located.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:2"
})
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
@test_util.deprecated_graph_mode_only
def testRemoteIteratorUsingRemoteCallOpMultiWorkers(self):
s1 = server_lib.Server.create_local_server()
s2 = server_lib.Server.create_local_server()
s3 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
workers = cluster_def.job.add()
workers.name = "worker"
workers.tasks[0] = s1.target[len("grpc://"):]
workers.tasks[1] = s2.target[len("grpc://"):]
client = cluster_def.job.add()
client.name = "client"
client.tasks[0] = s3.target[len("grpc://"):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
worker_devices = [
"/job:worker/replica:0/task:%d/cpu:0" % i for i in range(2)
]
itr_handles = []
for device in worker_devices:
with ops.device(device):
src = dataset_ops.Dataset.from_tensor_slices([device])
itr = dataset_ops.make_one_shot_iterator(src)
itr_handles.append(itr.string_handle())
targets = dataset_ops.Dataset.from_tensor_slices(worker_devices)
handles = dataset_ops.Dataset.from_tensor_slices(itr_handles)
@function.Defun(dtypes.string)
def loading_func(h):
remote_itr = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(itr),
dataset_ops.get_legacy_output_shapes(itr))
return remote_itr.get_next()
def map_fn(target, handle):
return functional_ops.remote_call(
args=[handle], Tout=[dtypes.string], f=loading_func, target=target)
with ops.device("/job:client"):
client_dataset = dataset_ops.Dataset.zip((targets, handles)).map(map_fn)
itr = dataset_ops.make_initializable_iterator(client_dataset)
n = itr.get_next()
with session.Session(s3.target, config=config) as sess:
sess.run(itr.initializer)
expected_values = worker_devices
for expected in expected_values:
self.assertEqual((compat.as_bytes(expected),), sess.run(n))
with self.assertRaises(errors.OutOfRangeError):
sess.run(n)
@test_util.deprecated_graph_mode_only
def testRemoteIteratorUsingRemoteCallOpDirectSessionGPUCPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_3_handle = iterator_3.string_handle()
def _encode_raw(byte_array):
return bytes(bytearray(byte_array))
@function.Defun(dtypes.uint8)
def _remote_fn(h):
handle = script_ops.py_func(_encode_raw, [h], dtypes.string)
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
iterator_3_handle_uint8 = parsing_ops.decode_raw(
input_bytes=iterator_3_handle, out_type=dtypes.uint8)
remote_op = functional_ops.remote_call(
args=[iterator_3_handle_uint8],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.cached_session() as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [1])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
@test_util.deprecated_graph_mode_only
def testIncorrectIteratorRestore(self):
def _path():
return os.path.join(self.get_temp_dir(), "iterator")
def _save_op(iterator_resource):
iterator_state_variant = gen_dataset_ops.serialize_iterator(
iterator_resource)
save_op = io_ops.write_file(
_path(), parsing_ops.serialize_tensor(iterator_state_variant))
return save_op
def _restore_op(iterator_resource):
iterator_state_variant = parsing_ops.parse_tensor(
io_ops.read_file(_path()), dtypes.variant)
restore_op = gen_dataset_ops.deserialize_iterator(iterator_resource,
iterator_state_variant)
return restore_op
def _build_range_dataset_graph():
start = 1
stop = 10
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(start, stop))
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = _save_op(iterator._iterator_resource)
restore_op = _restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
def _build_reader_dataset_graph():
filenames = ["test"] # Does not exist but we don't care in this test.
iterator = dataset_ops.make_initializable_iterator(
readers.FixedLengthRecordDataset(filenames, 1, 0, 0))
init_op = iterator.initializer
get_next_op = iterator.get_next()
save_op = _save_op(iterator._iterator_resource)
restore_op = _restore_op(iterator._iterator_resource)
return init_op, get_next_op, save_op, restore_op
# Saving iterator for RangeDataset graph.
with ops.Graph().as_default() as g:
init_op, _, save_op, _ = _build_range_dataset_graph()
with self.session(graph=g) as sess:
sess.run(init_op)
sess.run(save_op)
# Attempt to restore the saved iterator into an IteratorResource of
# incompatible type. An iterator of RangeDataset has output type int64,
# while an iterator of FixedLengthRecordDataset has output type string.
# So an InvalidArgumentError should be raised by
# IteratorResource::set_iterator.
with ops.Graph().as_default() as g:
_, _, _, restore_op = _build_reader_dataset_graph()
with self.session(graph=g) as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(restore_op)
@test_util.deprecated_graph_mode_only
def testRepeatedGetNextWarning(self):
iterator = dataset_ops.make_one_shot_iterator(dataset_ops.Dataset.range(10))
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
for _ in range(100):
iterator.get_next()
self.assertEqual(100 - iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD, len(w))
for warning in w:
self.assertIn(
iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE, str(warning.message))
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0),
structure.TensorStructure(dtypes.float32, []),
ops.Tensor, dtypes.float32, []),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[0]], values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1]),
structure.SparseTensorStructure(dtypes.int32, [1]),
sparse_tensor.SparseTensor, dtypes.int32, [1]),
("Nest", lambda: {
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))},
structure.NestedStructure({
"a": structure.TensorStructure(dtypes.float32, []),
"b": (structure.TensorStructure(dtypes.string, [1]),
structure.TensorStructure(dtypes.string, []))}),
{"a": ops.Tensor, "b": (ops.Tensor, ops.Tensor)},
{"a": dtypes.float32, "b": (dtypes.string, dtypes.string)},
{"a": [], "b": ([1], [])}),
)
def testIteratorStructure(self, tf_value_fn, expected_element_structure,
expected_output_classes, expected_output_types,
expected_output_shapes):
tf_value = tf_value_fn()
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensors(tf_value))
self.assertTrue(expected_element_structure.is_compatible_with(
iterator._element_structure))
self.assertTrue(iterator._element_structure.is_compatible_with(
expected_element_structure))
self.assertEqual(expected_output_classes,
dataset_ops.get_legacy_output_classes(iterator))
self.assertEqual(expected_output_types,
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(expected_output_shapes,
dataset_ops.get_legacy_output_shapes(iterator))
def testIteratorGetNextName(self):
with ops.Graph().as_default():
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensors(37.0))
next_element = iterator.get_next(name="overridden_name")
self.assertEqual("overridden_name", next_element.op.name)
@parameterized.named_parameters(
("Async", context.ASYNC),
("Sync", context.SYNC),
)
def testIteratorEagerIteration(self, execution_mode):
with context.eager_mode(), context.execution_mode(execution_mode):
val = 0
dataset = dataset_ops.Dataset.range(10)
iterator = iter(dataset)
for foo in iterator:
self.assertEqual(val, foo.numpy())
val += 1
@test_util.run_v2_only
def testIteratorV2Function(self):
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
@def_function.function
def fn():
dataset = dataset_ops.Dataset.range(10)
iterator = iter(dataset)
for _ in range(10):
queue.enqueue(next(iterator))
fn()
for i in range(10):
self.assertEqual(queue.dequeue().numpy(), i)
@test_util.run_v2_only
def testIteratorV2FunctionError(self):
# In this test we verify that a function that raises an error ends up
# properly deallocating the iterator resource.
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
queue.enqueue(0)
def init_fn(n):
return n
def next_fn(_):
ds = dataset_ops.Dataset.range(0)
return next(iter(ds))
def finalize_fn(n):
queue.enqueue(0)
return n
@def_function.function
def fn():
dataset = dataset_ops._GeneratorDataset(1, init_fn, next_fn, finalize_fn)
iterator = iter(dataset)
next(iterator)
with self.assertRaises(errors.OutOfRangeError):
fn()
self.assertEqual(queue.size().numpy(), 2)
if __name__ == "__main__":
test.main()
|
|
# pylint: skip-file
# flake8: noqa
class RegistryException(Exception):
''' Registry Exception Class '''
pass
class RegistryConfig(OpenShiftCLIConfig):
''' RegistryConfig is a DTO for the registry. '''
def __init__(self, rname, namespace, kubeconfig, registry_options):
super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options)
class Registry(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
volume_mount_path = 'spec.template.spec.containers[0].volumeMounts'
volume_path = 'spec.template.spec.volumes'
env_path = 'spec.template.spec.containers[0].env'
def __init__(self,
registry_config,
verbose=False):
''' Constructor for Registry
a registry consists of 3 or more parts
- dc/docker-registry
- svc/docker-registry
Parameters:
:registry_config:
:verbose:
'''
super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose)
self.version = OCVersion(registry_config.kubeconfig, verbose)
self.svc_ip = None
self.portal_ip = None
self.config = registry_config
self.verbose = verbose
self.registry_parts = [{'kind': 'dc', 'name': self.config.name},
{'kind': 'svc', 'name': self.config.name},
]
self.__prepared_registry = None
self.volume_mounts = []
self.volumes = []
if self.config.config_options['volume_mounts']['value']:
for volume in self.config.config_options['volume_mounts']['value']:
volume_info = {'secret_name': volume.get('secret_name', None),
'name': volume.get('name', None),
'type': volume.get('type', None),
'path': volume.get('path', None),
'claimName': volume.get('claim_name', None),
'claimSize': volume.get('claim_size', None),
}
vol, vol_mount = Volume.create_volume_structure(volume_info)
self.volumes.append(vol)
self.volume_mounts.append(vol_mount)
self.dconfig = None
self.svc = None
@property
def deploymentconfig(self):
''' deploymentconfig property '''
return self.dconfig
@deploymentconfig.setter
def deploymentconfig(self, config):
''' setter for deploymentconfig property '''
self.dconfig = config
@property
def service(self):
''' service property '''
return self.svc
@service.setter
def service(self, config):
''' setter for service property '''
self.svc = config
@property
def prepared_registry(self):
''' prepared_registry property '''
if not self.__prepared_registry:
results = self.prepare_registry()
if not results:
raise RegistryException('Could not perform registry preparation.')
self.__prepared_registry = results
return self.__prepared_registry
@prepared_registry.setter
def prepared_registry(self, data):
''' setter method for prepared_registry attribute '''
self.__prepared_registry = data
def get(self):
''' return the self.registry_parts '''
self.deploymentconfig = None
self.service = None
rval = 0
for part in self.registry_parts:
result = self._get(part['kind'], rname=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
self.service = Yedit(content=result['results'][0])
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'deploymentconfig': self.deploymentconfig, 'service': self.service}
def exists(self):
'''does the object exist?'''
self.get()
if self.deploymentconfig or self.service:
return True
return False
def delete(self, complete=True):
'''return all pods '''
parts = []
for part in self.registry_parts:
if not complete and part['kind'] == 'svc':
continue
parts.append(self._delete(part['kind'], part['name']))
# Clean up returned results
rval = 0
for part in parts:
# pylint: disable=invalid-sequence-index
if 'returncode' in part and part['returncode'] != 0:
rval = part['returncode']
return {'returncode': rval, 'results': parts}
def prepare_registry(self):
''' prepare a registry for instantiation '''
options = self.config.to_option_list()
cmd = ['registry', '-n', self.config.namespace]
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
# probably need to parse this
# pylint thinks results is a string
# pylint: disable=no-member
if results['returncode'] != 0 and results['results'].has_key('items'):
return results
service = None
deploymentconfig = None
# pylint: disable=invalid-sequence-index
for res in results['results']['items']:
if res['kind'] == 'DeploymentConfig':
deploymentconfig = DeploymentConfig(res)
elif res['kind'] == 'Service':
service = Service(res)
# Verify we got a service and a deploymentconfig
if not service or not deploymentconfig:
return results
# results will need to get parsed here and modifications added
deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig))
# modify service ip
if self.svc_ip:
service.put('spec.clusterIP', self.svc_ip)
if self.portal_ip:
service.put('spec.portalIP', self.portal_ip)
# need to create the service and the deploymentconfig
service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict)
deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict)
return {"service": service,
"service_file": service_file,
"service_update": False,
"deployment": deploymentconfig,
"deployment_file": deployment_file,
"deployment_update": False}
def create(self):
'''Create a registry'''
results = []
for config_file in ['deployment_file', 'service_file']:
results.append(self._create(self.prepared_registry[config_file]))
# Clean up returned results
rval = 0
for result in results:
# pylint: disable=invalid-sequence-index
if 'returncode' in result and result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def update(self):
'''run update for the registry. This performs a delete and then create '''
# Store the current service IP
if self.service:
svcip = self.service.get('spec.clusterIP')
if svcip:
self.svc_ip = svcip
portip = self.service.get('spec.portalIP')
if portip:
self.portal_ip = portip
results = []
if self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
if self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def add_modifications(self, deploymentconfig):
''' update a deployment config with changes '''
# Currently we know that our deployment of a registry requires a few extra modifications
# Modification 1
# we need specific environment variables to be set
for key, value in self.config.config_options['env_vars']['value'].items():
if not deploymentconfig.exists_env_key(key):
deploymentconfig.add_env_value(key, value)
else:
deploymentconfig.update_env_var(key, value)
# Modification 2
# we need specific volume variables to be set
for volume in self.volumes:
deploymentconfig.update_volume(volume)
for vol_mount in self.volume_mounts:
deploymentconfig.update_volume_mount(vol_mount)
# Modification 3
# Edits
edit_results = []
for edit in self.config.config_options['edits'].get('value', []):
if edit['action'] == 'put':
edit_results.append(deploymentconfig.put(edit['key'],
edit['value']))
if edit['action'] == 'update':
edit_results.append(deploymentconfig.update(edit['key'],
edit['value'],
edit.get('index', None),
edit.get('curr_value', None)))
if edit['action'] == 'append':
edit_results.append(deploymentconfig.append(edit['key'],
edit['value']))
if edit_results and not any([res[0] for res in edit_results]):
return None
return deploymentconfig.yaml_dict
def needs_update(self):
''' check to see if we need to update '''
if not self.service or not self.deploymentconfig:
return True
exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol']
if not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
self.service.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['service_update'] = True
exclude_list = ['dnsPolicy',
'terminationGracePeriodSeconds',
'restartPolicy', 'timeoutSeconds',
'livenessProbe', 'readinessProbe',
'terminationMessagePath',
'securityContext',
'imagePullPolicy',
'protocol', # ports.portocol: TCP
'type', # strategy: {'type': 'rolling'}
'defaultMode', # added on secrets
'activeDeadlineSeconds', # added in 1.5 for timeouts
]
if not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
self.deploymentconfig.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['deployment_update'] = True
return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False
# In the future, we would like to break out each ansible state into a function.
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''run idempotent ansible code'''
rconfig = RegistryConfig(params['name'],
params['namespace'],
params['kubeconfig'],
{'images': {'value': params['images'], 'include': True},
'latest_images': {'value': params['latest_images'], 'include': True},
'labels': {'value': params['labels'], 'include': True},
'ports': {'value': ','.join(params['ports']), 'include': True},
'replicas': {'value': params['replicas'], 'include': True},
'selector': {'value': params['selector'], 'include': True},
'service_account': {'value': params['service_account'], 'include': True},
'mount_host': {'value': params['mount_host'], 'include': True},
'env_vars': {'value': params['env_vars'], 'include': False},
'volume_mounts': {'value': params['volume_mounts'], 'include': False},
'edits': {'value': params['edits'], 'include': False},
'enforce_quota': {'value': params['enforce_quota'], 'include': True},
'daemonset': {'value': params['daemonset'], 'include': True},
'tls_key': {'value': params['tls_key'], 'include': True},
'tls_certificate': {'value': params['tls_certificate'], 'include': True},
})
ocregistry = Registry(rconfig, params['debug'])
api_rval = ocregistry.get()
state = params['state']
########
# get
########
if state == 'list':
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
if not ocregistry.exists():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
# Unsure as to why this is angry with the return type.
# pylint: disable=redefined-variable-type
api_rval = ocregistry.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
if state == 'present':
########
# Create
########
if not ocregistry.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
api_rval = ocregistry.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if not params['force'] and not ocregistry.needs_update():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = ocregistry.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. %s' % state}
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow composable models used as building blocks for estimators (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import re
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.util.deprecation import deprecated
class _ComposableModel(object):
"""ABC for building blocks that can be used to create estimators.
Subclasses need to implement the following methods:
- build_model
- _get_optimizer
See below for the required signatures.
_ComposableModel and its subclasses are not part of the public tf.learn API.
"""
@deprecated(None, "Please use model_fns in tf.estimator.")
def __init__(self,
num_label_columns,
optimizer,
gradient_clip_norm,
num_ps_replicas,
scope,
trainable=True):
"""Common initialization for all _ComposableModel objects.
Args:
num_label_columns: The number of label columns.
optimizer: An instance of `tf.Optimizer` used to apply gradients to
the model. If `None`, will use a FTRL optimizer.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
num_ps_replicas: The number of parameter server replicas.
scope: Scope for variables created in this model.
trainable: True if this model contains variables that can be trained.
False otherwise (in cases where the variables are used strictly for
transforming input labels for training).
"""
self._num_label_columns = num_label_columns
self._optimizer = optimizer
self._gradient_clip_norm = gradient_clip_norm
self._num_ps_replicas = num_ps_replicas
self._scope = scope
self._trainable = trainable
self._feature_columns = None
def get_scope_name(self):
"""Returns the scope name used by this model for variables."""
return self._scope
def build_model(self, features, feature_columns, is_training):
"""Builds the model that can calculate the logits.
Args:
features: A mapping from feature columns to tensors.
feature_columns: An iterable containing all the feature columns used
by the model. All items in the set should be instances of
classes derived from `FeatureColumn`.
is_training: Set to True when training, False otherwise.
Returns:
The logits for this model.
"""
raise NotImplementedError
def get_train_step(self, loss):
"""Returns the ops to run to perform a training step on this estimator.
Args:
loss: The loss to use when calculating gradients.
Returns:
The ops to run to perform a training step.
"""
my_vars = self._get_vars()
if not (self._get_feature_columns() or my_vars):
return []
grads = gradients.gradients(loss, my_vars)
if self._gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, self._gradient_clip_norm)
return [self._get_optimizer().apply_gradients(zip(grads, my_vars))]
def _get_feature_columns(self):
if not self._feature_columns:
return None
feature_column_ops.check_feature_columns(self._feature_columns)
return sorted(set(self._feature_columns), key=lambda x: x.key)
def _get_vars(self):
if self._get_feature_columns():
return ops.get_collection(self._scope)
return []
def _get_optimizer(self):
if (self._optimizer is None or isinstance(self._optimizer,
six.string_types)):
optimizer = self._get_default_optimizer(self._optimizer)
elif callable(self._optimizer):
optimizer = self._optimizer()
else:
optimizer = self._optimizer
return optimizer
def _get_default_optimizer(self, optimizer_name=None):
raise NotImplementedError
class LinearComposableModel(_ComposableModel):
"""A _ComposableModel that implements linear regression.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Instances of this class can be used to build estimators through the use
of composition.
"""
def __init__(self,
num_label_columns,
optimizer=None,
_joint_weights=False,
gradient_clip_norm=None,
num_ps_replicas=0,
scope=None,
trainable=True):
"""Initializes LinearComposableModel objects.
Args:
num_label_columns: The number of label columns.
optimizer: An instance of `tf.Optimizer` used to apply gradients to
the model. If `None`, will use a FTRL optimizer.
_joint_weights: If True use a single (possibly partitioned) variable
to store all weights in this model. Faster, but requires that all
feature columns are sparse and have the 'sum' combiner.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
num_ps_replicas: The number of parameter server replicas.
scope: Optional scope for variables created in this model. If scope
is not supplied, it will default to 'linear'.
trainable: True if this model contains variables that can be trained.
False otherwise (in cases where the variables are used strictly for
transforming input labels for training).
"""
scope = "linear" if not scope else scope
super(LinearComposableModel, self).__init__(
num_label_columns=num_label_columns,
optimizer=optimizer,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas,
scope=scope,
trainable=trainable)
self._joint_weights = _joint_weights
def get_weights(self, model_dir):
"""Returns weights per feature of the linear part.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The weights created by this model (without the optimizer weights).
"""
all_variables = [name for name, _ in list_variables(model_dir)]
values = {}
optimizer_regex = r".*/" + self._get_optimizer().get_name() + r"(_\d)?$"
for name in all_variables:
if (name.startswith(self._scope + "/") and
name != self._scope + "/bias_weight" and
not re.match(optimizer_regex, name)):
values[name] = load_variable(model_dir, name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
def get_bias(self, model_dir):
"""Returns bias of the model.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The bias weights created by this model.
"""
return load_variable(model_dir, name=(self._scope + "/bias_weight"))
def build_model(self, features, feature_columns, is_training):
"""See base class."""
self._feature_columns = feature_columns
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas, min_slice_size=64 << 20)
with variable_scope.variable_scope(
self._scope, values=features.values(),
partitioner=partitioner) as scope:
if self._joint_weights:
logits, _, _ = layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_feature_columns(),
num_outputs=self._num_label_columns,
weight_collections=[self._scope],
trainable=self._trainable,
scope=scope)
else:
logits, _, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_feature_columns(),
num_outputs=self._num_label_columns,
weight_collections=[self._scope],
trainable=self._trainable,
scope=scope)
return logits
def _get_default_optimizer(self, optimizer_name=None):
if optimizer_name is None:
optimizer_name = "Ftrl"
default_learning_rate = 1. / math.sqrt(len(self._get_feature_columns()))
default_learning_rate = min(0.2, default_learning_rate)
return layers.OPTIMIZER_CLS_NAMES[optimizer_name](
learning_rate=default_learning_rate)
class DNNComposableModel(_ComposableModel):
"""A _ComposableModel that implements a DNN.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Instances of this class can be used to build estimators through the use
of composition.
"""
def __init__(self,
num_label_columns,
hidden_units,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
num_ps_replicas=0,
scope=None,
trainable=True):
"""Initializes DNNComposableModel objects.
Args:
num_label_columns: The number of label columns.
hidden_units: List of hidden units per layer. All layers are fully
connected.
optimizer: An instance of `tf.Optimizer` used to apply gradients to
the model. If `None`, will use a FTRL optimizer.
activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
num_ps_replicas: The number of parameter server replicas.
scope: Optional scope for variables created in this model. If not scope
is supplied, one is generated.
trainable: True if this model contains variables that can be trained.
False otherwise (in cases where the variables are used strictly for
transforming input labels for training).
"""
scope = "dnn" if not scope else scope
super(DNNComposableModel, self).__init__(
num_label_columns=num_label_columns,
optimizer=optimizer,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas,
scope=scope,
trainable=trainable)
self._hidden_units = hidden_units
self._activation_fn = activation_fn
self._dropout = dropout
def get_weights(self, model_dir):
"""Returns the weights of the model.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The weights created by this model.
"""
return [
load_variable(
model_dir, name=(self._scope + "/hiddenlayer_%d/weights" % i))
for i, _ in enumerate(self._hidden_units)
] + [load_variable(
model_dir, name=(self._scope + "/logits/weights"))]
def get_bias(self, model_dir):
"""Returns the bias of the model.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The bias weights created by this model.
"""
return [
load_variable(
model_dir, name=(self._scope + "/hiddenlayer_%d/biases" % i))
for i, _ in enumerate(self._hidden_units)
] + [load_variable(
model_dir, name=(self._scope + "/logits/biases"))]
def _add_hidden_layer_summary(self, value, tag):
# TODO(zakaria): Move this code to tf.learn and add test.
summary.scalar("%s/fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s/activation" % tag, value)
def build_model(self, features, feature_columns, is_training):
"""See base class."""
self._feature_columns = feature_columns
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas, min_slice_size=64 << 20))
with variable_scope.variable_scope(
self._scope + "/input_from_feature_columns",
values=features.values(),
partitioner=input_layer_partitioner) as scope:
net = layers.input_from_feature_columns(
features,
self._get_feature_columns(),
weight_collections=[self._scope],
trainable=self._trainable,
scope=scope)
hidden_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas))
for layer_id, num_hidden_units in enumerate(self._hidden_units):
with variable_scope.variable_scope(
self._scope + "/hiddenlayer_%d" % layer_id,
values=[net],
partitioner=hidden_layer_partitioner) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=self._activation_fn,
variables_collections=[self._scope],
trainable=self._trainable,
scope=scope)
if self._dropout is not None and is_training:
net = layers.dropout(net, keep_prob=(1.0 - self._dropout))
self._add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_scope(
self._scope + "/logits",
values=[net],
partitioner=hidden_layer_partitioner) as scope:
logits = layers.fully_connected(
net,
self._num_label_columns,
activation_fn=None,
variables_collections=[self._scope],
trainable=self._trainable,
scope=scope)
self._add_hidden_layer_summary(logits, "logits")
return logits
def _get_default_optimizer(self, optimizer_name=None):
if optimizer_name is None:
optimizer_name = "Adagrad"
return layers.OPTIMIZER_CLS_NAMES[optimizer_name](learning_rate=0.05)
|
|
#!/usr/bin/env python
# Copyright 2017 Ericsson AB.
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
import time
import getopt
import sys
import random
def generateGenericMeta(type, t, v):
return {"type": type, "id": str(uuid.uuid4()), "time": t, "source": {"domainId": "example.domain"}, "version": v}
def link(source, target, type):
if not target:
return
source["links"].append({"type": type, "target": target["meta"]["id"]})
def generateGenericMessage(type, t, v, name, iteration):
meta = generateGenericMeta(type, t, v)
data = {"customData": [{"key": "name", "value": name}, {"key": "iteration", "value": iteration}]}
links = []
msg = {}
msg["meta"] = meta
msg["data"] = data
msg["links"] = links
return msg
def findLatestPrevious(iterationsMap, currentIteration, name):
for it in range(currentIteration - 1, -1, -1):
if it in iterationsMap:
if name in iterationsMap[it]:
return iterationsMap[it][name]
def randomizeVerdict(chanceOfSuccess):
if random.random() < chanceOfSuccess:
return "PASSED"
else:
return "FAILED"
def getOutcomeValuesFromVerdicts(testCaseFinishedEventsArray, positiveName, negativeName):
for tcfEvent in testCaseFinishedEventsArray:
if tcfEvent["data"]["outcome"]["verdict"] != "PASSED":
return negativeName
return positiveName
def generateSCC1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelSourceChangeCreatedEvent", t, "1.0.0", "SCC1", iteration)
link(msg, findLatestPrevious(iterationsMap, iteration, "SCS1"), "BASE")
msg["data"]["gitIdentifier"] = {"commitId": "fd090b60a4aedc5161da9c035a49b14a319829c5", "branch": "topic-branch-" + str(iteration), "repoName": "myRepo", "repoUri": "https://repo.com"}
msg["data"]["author"] = {"name": "John Doe", "email": "john.doe@company.com", "id": "johnxxx", "group": "Team Gophers"}
msg["data"]["change"] = {"files": "https://filelist.com/" + str(iteration), "insertions": random.randint(10, 500), "deletions": random.randint(10, 500)}
return msg
def generateSCS1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelSourceChangeSubmittedEvent", t, "1.0.0", "SCS1", iteration)
link(msg, findLatestPrevious(iterationsMap, iteration, "SCS1"), "PREVIOUS_VERSION")
if "SCC1" in iterationsMap[iteration]:
link(msg, iterationsMap[iteration]["SCC1"], "CHANGE")
msg["data"]["gitIdentifier"] = {"commitId": "fd090b60a4aedc5161da9c035a49b14a319829b4", "branch": "master", "repoName": "myRepo", "repoUri": "https://repo.com"}
msg["data"]["submitter"] = {"name": "John Doe", "email": "john.doe@company.com", "id": "johnxxx", "group": "Team Gophers"}
return msg
def generateEDef1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelEnvironmentDefinedEvent", t, "1.0.0", "EDef1", iteration)
msg["data"]["name"] = "Environment 1"
msg["data"]["version"] = str(iteration)
return msg
def generateEDef2(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelEnvironmentDefinedEvent", t, "1.0.0", "EDef2", iteration)
msg["data"]["name"] = "Environment 2"
msg["data"]["version"] = str(iteration)
return msg
def generateArtC3(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelArtifactCreatedEvent", t, "2.0.0", "ArtC3", iteration)
msg["data"]["identity"] = "pkg:maven/com.othercompany.library/third-party-library@3.2.4"
return msg
def generateCDef1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelCompositionDefinedEvent", t, "1.0.0", "CDef1", iteration)
msg["data"]["name"] = "Composition 1"
msg["data"]["version"] = str(iteration)
link(msg, findLatestPrevious(iterationsMap, iteration, "CDef1"), "PREVIOUS_VERSION")
link(msg, iterationsMap[iteration]["CLM2"], "CAUSE")
link(msg, iterationsMap[iteration]["ArtC2"], "ELEMENT")
link(msg, iterationsMap[0]["ArtC3"], "ELEMENT")
return msg
def generateCDef2(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelCompositionDefinedEvent", t, "1.0.0", "CDef2", iteration)
msg["data"]["name"] = "Composition 2"
msg["data"]["version"] = str(iteration)
link(msg, findLatestPrevious(iterationsMap, iteration, "CDef2"), "PREVIOUS_VERSION")
link(msg, iterationsMap[iteration]["ActT4"], "CONTEXT")
link(msg, findLatestPrevious(iterationsMap, iteration + 1, "ArtCC1"), "ELEMENT")
link(msg, findLatestPrevious(iterationsMap, iteration + 1, "ArtCC2"), "ELEMENT")
link(msg, findLatestPrevious(iterationsMap, iteration + 1, "ArtCC3"), "ELEMENT")
return msg
def generateCDef3(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelCompositionDefinedEvent", t, "1.0.0", "CDef3", iteration)
msg["data"]["name"] = "Composition 3"
msg["data"]["version"] = str(iteration)
link(msg, findLatestPrevious(iterationsMap, iteration, "CDef3"), "PREVIOUS_VERSION")
link(msg, iterationsMap[iteration]["SCS1"], "ELEMENT")
return msg
def generateArtC1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelArtifactCreatedEvent", t, "2.0.0", "ArtC1", iteration)
link(msg, iterationsMap[iteration]["CDef1"], "COMPOSITION")
link(msg, iterationsMap[0]["EDef1"], "ENVIRONMENT")
link(msg, iterationsMap[iteration]["CDef1"], "CAUSE")
link(msg, findLatestPrevious(iterationsMap, iteration, "ArtC1"), "PREVIOUS_VERSION")
msg["data"]["identity"] = "pkg:maven/com.mycompany.myproduct/complete-system@1." + str(iteration) + ".0"
return msg
def generateArtC2(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelArtifactCreatedEvent", t, "2.0.0", "ArtC2", iteration)
link(msg, iterationsMap[iteration]["CDef2"], "COMPOSITION")
link(msg, iterationsMap[0]["EDef2"], "ENVIRONMENT")
link(msg, findLatestPrevious(iterationsMap, iteration, "ArtC2"), "PREVIOUS_VERSION")
link(msg, iterationsMap[iteration]["ActT4"], "CONTEXT")
msg["data"]["identity"] = "pkg:maven/com.mycompany.myproduct/sub-system@1." + str(iteration) + ".0"
return msg
def generateArtCC1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelArtifactCreatedEvent", t, "2.0.0", "ArtCC1", iteration)
link(msg, iterationsMap[iteration]["CDef3"], "COMPOSITION")
link(msg, iterationsMap[iteration]["CDef3"], "CAUSE")
link(msg, findLatestPrevious(iterationsMap, iteration, "ArtCC1"), "PREVIOUS_VERSION")
msg["data"]["identity"] = "pkg:maven/com.mycompany.myproduct/component-1@1." + str(iteration) + ".0"
return msg
def generateArtCC2(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelArtifactCreatedEvent", t, "2.0.0", "ArtCC2", iteration)
link(msg, iterationsMap[iteration]["CDef3"], "COMPOSITION")
link(msg, iterationsMap[iteration]["CDef3"], "CAUSE")
link(msg, findLatestPrevious(iterationsMap, iteration, "ArtCC2"), "PREVIOUS_VERSION")
msg["data"]["identity"] = "pkg:maven/com.mycompany.myproduct/component-2@1." + str(iteration) + ".0"
return msg
def generateArtCC3(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelArtifactCreatedEvent", t, "2.0.0", "ArtCC3", iteration)
link(msg, iterationsMap[iteration]["CDef3"], "COMPOSITION")
link(msg, iterationsMap[iteration]["CDef3"], "CAUSE")
link(msg, findLatestPrevious(iterationsMap, iteration, "ArtCC3"), "PREVIOUS_VERSION")
msg["data"]["identity"] = "pkg:maven/com.mycompany.myproduct/component-3@1." + str(iteration) + ".0"
return msg
def generateArtP1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelArtifactPublishedEvent", t, "1.0.0", "ArtP1", iteration)
link(msg, iterationsMap[iteration]["ArtC1"], "ARTIFACT")
link(msg, iterationsMap[iteration]["ArtC1"], "CAUSE")
msg["data"]["locations"] = [{"type": "PLAIN", "uri": "https://myrepository.com/myCompleteSystemArtifact"}]
return msg
def generateArtP2(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelArtifactPublishedEvent", t, "1.0.0", "ArtP2", iteration)
link(msg, iterationsMap[iteration]["ArtC2"], "ARTIFACT")
link(msg, iterationsMap[iteration]["ActT4"], "CONTEXT")
msg["data"]["locations"] = [{"type": "PLAIN", "uri": "https://myrepository.com/mySubSystemArtifact"}]
return msg
def generateActT3(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelActivityTriggeredEvent", t, "1.0.0", "ActT3", iteration)
link(msg, iterationsMap[iteration]["ArtP2"], "CAUSE")
msg["data"]["name"] = "Act3"
msg["data"]["categories"] = ["Sub-system Test Activity"]
msg["data"]["triggers"] = [ {"type": "EIFFEL_EVENT"} ]
msg["data"]["executionType"] = "AUTOMATED"
return msg
def generateActS3(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelActivityStartedEvent", t, "1.0.0", "ActS3", iteration)
link(msg, iterationsMap[iteration]["ActT3"], "ACTIVITY_EXECUTION")
return msg
def generateActF3(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelActivityFinishedEvent", t, "1.0.0", "ActF3", iteration)
link(msg, iterationsMap[iteration]["ActT3"], "ACTIVITY_EXECUTION")
msg["data"]["outcome"] = {"conclusion": getOutcomeValuesFromVerdicts([iterationsMap[iteration]["TSF1"]], "SUCCESSFUL", "UNSUCCESSFUL")}
return msg
def generateActT4(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelActivityTriggeredEvent", t, "1.0.0", "ActT4", iteration)
if "ArtCC1" in iterationsMap[iteration]:
link(msg, iterationsMap[iteration]["ArtCC1"], "CAUSE")
if "ArtCC2" in iterationsMap[iteration]:
link(msg, iterationsMap[iteration]["ArtCC2"], "CAUSE")
if "ArtCC3" in iterationsMap[iteration]:
link(msg, iterationsMap[iteration]["ArtCC3"], "CAUSE")
msg["data"]["name"] = "Act4"
msg["data"]["categories"] = ["Sub-system Build Activity"]
msg["data"]["triggers"] = [ {"type": "EIFFEL_EVENT"} ]
msg["data"]["executionType"] = "AUTOMATED"
return msg
def generateActS4(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelActivityStartedEvent", t, "1.0.0", "ActS4", iteration)
link(msg, iterationsMap[iteration]["ActT4"], "ACTIVITY_EXECUTION")
return msg
def generateActF4(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelActivityFinishedEvent", t, "1.0.0", "ActF4", iteration)
link(msg, iterationsMap[iteration]["ActT4"], "ACTIVITY_EXECUTION")
if "ArtC2" in iterationsMap[iteration]:
msg["data"]["outcome"] = {"conclusion": "SUCCESSFUL"}
else:
msg["data"]["outcome"] = {"conclusion": "UNSUCCESSFUL"}
return msg
def generateActT1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelActivityTriggeredEvent", t, "1.0.0", "ActT1", iteration)
link(msg, iterationsMap[iteration]["ArtP1"], "CAUSE")
msg["data"]["name"] = "Act1"
msg["data"]["categories"] = ["Test Activity"]
msg["data"]["triggers"] = [ {"type": "EIFFEL_EVENT"} ]
msg["data"]["executionType"] = "AUTOMATED"
return msg
def generateActS1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelActivityStartedEvent", t, "1.0.0", "ActS1", iteration)
link(msg, iterationsMap[iteration]["ActT1"], "ACTIVITY_EXECUTION")
return msg
def generateActF1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelActivityFinishedEvent", t, "1.0.0", "ActF1", iteration)
link(msg, iterationsMap[iteration]["ActT1"], "ACTIVITY_EXECUTION")
msg["data"]["outcome"] = {"conclusion": getOutcomeValuesFromVerdicts([iterationsMap[iteration]["TCF1"], iterationsMap[iteration]["TCF2"]], "SUCCESSFUL", "UNSUCCESSFUL")}
return msg
def generateTCT1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseTriggeredEvent", t, "1.0.0", "TCT1", iteration)
msg["data"]["testCase"] = {"tracker": "My First Test Management System", "id": "TC1", "uri": "http://tm.company.com/browse/TC1"}
link(msg, iterationsMap[iteration]["ActT1"], "CONTEXT")
link(msg, iterationsMap[iteration]["ArtC1"], "IUT")
return msg
def generateTCS1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseStartedEvent", t, "1.0.0", "TCS1", iteration)
link(msg, iterationsMap[iteration]["TCT1"], "TEST_CASE_EXECUTION")
return msg
def generateTCF1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseFinishedEvent", t, "1.0.0", "TCF1", iteration)
link(msg, iterationsMap[iteration]["TCT1"], "TEST_CASE_EXECUTION")
msg["data"]["outcome"] = {"verdict": randomizeVerdict(0.95), "conclusion": "SUCCESSFUL"}
return msg
def generateTCT2(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseTriggeredEvent", t, "1.0.0", "TCT2", iteration)
msg["data"]["testCase"] = {"tracker": "My First Test Management System", "id": "TC2", "uri": "http://tm.company.com/browse/TC2"}
link(msg, iterationsMap[iteration]["ActT1"], "CONTEXT")
link(msg, iterationsMap[iteration]["ArtC1"], "IUT")
return msg
def generateTCS2(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseStartedEvent", t, "1.0.0", "TCS2", iteration)
link(msg, iterationsMap[iteration]["TCT2"], "TEST_CASE_EXECUTION")
return msg
def generateTCF2(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseFinishedEvent", t, "1.0.0", "TCF2", iteration)
link(msg, iterationsMap[iteration]["TCT2"], "TEST_CASE_EXECUTION")
msg["data"]["outcome"] = {"verdict": randomizeVerdict(0.95), "conclusion": "SUCCESSFUL"}
return msg
def generateActT2(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelActivityTriggeredEvent", t, "1.0.0", "ActT2", iteration)
link(msg, iterationsMap[iteration]["ArtP1"], "CAUSE")
msg["data"]["name"] = "Act2"
msg["data"]["categories"] = ["Test Activity"]
msg["data"]["triggers"] = [ {"type": "EIFFEL_EVENT"} ]
msg["data"]["executionType"] = "AUTOMATED"
return msg
def generateActS2(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelActivityStartedEvent", t, "1.0.0", "ActS2", iteration)
link(msg, iterationsMap[iteration]["ActT2"], "ACTIVITY_EXECUTION")
return msg
def generateActF2(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelActivityFinishedEvent", t, "1.0.0", "ActF2", iteration)
link(msg, iterationsMap[iteration]["ActT2"], "ACTIVITY_EXECUTION")
msg["data"]["outcome"] = {"conclusion": getOutcomeValuesFromVerdicts([iterationsMap[iteration]["TCF3"], iterationsMap[iteration]["TCF4"]], "SUCCESSFUL", "UNSUCCESSFUL")}
return msg
def generateTCT3(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseTriggeredEvent", t, "1.0.0", "TCT3", iteration)
msg["data"]["testCase"] = {"tracker": "My First Test Management System", "id": "TC3", "uri": "http://tm.company.com/browse/TC3"}
link(msg, iterationsMap[iteration]["ActT2"], "CONTEXT")
link(msg, iterationsMap[iteration]["ArtC1"], "IUT")
return msg
def generateTCS3(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseStartedEvent", t, "1.0.0", "TCS3", iteration)
link(msg, iterationsMap[iteration]["TCT3"], "TEST_CASE_EXECUTION")
return msg
def generateTCF3(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseFinishedEvent", t, "1.0.0", "TCF3", iteration)
link(msg, iterationsMap[iteration]["TCT3"], "TEST_CASE_EXECUTION")
msg["data"]["outcome"] = {"verdict": randomizeVerdict(0.99), "conclusion": "SUCCESSFUL"}
return msg
def generateTCT4(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseTriggeredEvent", t, "1.0.0", "TCT4", iteration)
msg["data"]["testCase"] = {"tracker": "My First Test Management System", "id": "TC4", "uri": "http://tm.company.com/browse/TC4"}
link(msg, iterationsMap[iteration]["ActT2"], "CONTEXT")
link(msg, iterationsMap[iteration]["ArtC1"], "IUT")
return msg
def generateTCS4(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseStartedEvent", t, "1.0.0", "TCS4", iteration)
link(msg, iterationsMap[iteration]["TCT4"], "TEST_CASE_EXECUTION")
return msg
def generateTCF4(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseFinishedEvent", t, "1.0.0", "TCF4", iteration)
link(msg, iterationsMap[iteration]["TCT4"], "TEST_CASE_EXECUTION")
msg["data"]["outcome"] = {"verdict": randomizeVerdict(0.90), "conclusion": "SUCCESSFUL"}
return msg
def generateTCT5(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseTriggeredEvent", t, "1.0.0", "TCT5", iteration)
msg["data"]["testCase"] = {"tracker": "My Other Test Management System", "id": "TC5", "uri": "https://other-tm.company.com/testCase/TC5"}
link(msg, iterationsMap[iteration]["TSS1"], "CONTEXT")
link(msg, iterationsMap[iteration]["ArtC2"], "IUT")
return msg
def generateTCS5(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseStartedEvent", t, "1.0.0", "TCS5", iteration)
link(msg, iterationsMap[iteration]["TCT5"], "TEST_CASE_EXECUTION")
return msg
def generateTCF5(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseFinishedEvent", t, "1.0.0", "TCF5", iteration)
link(msg, iterationsMap[iteration]["TCT5"], "TEST_CASE_EXECUTION")
msg["data"]["outcome"] = {"verdict": randomizeVerdict(0.98), "conclusion": "SUCCESSFUL"}
return msg
def generateTCT6(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseTriggeredEvent", t, "1.0.0", "TCT6", iteration)
msg["data"]["testCase"] = {"tracker": "My Other Test Management System", "id": "TC6", "uri": "https://other-tm.company.com/testCase/TC6"}
link(msg, iterationsMap[iteration]["TSS1"], "CONTEXT")
link(msg, iterationsMap[iteration]["ArtC2"], "IUT")
return msg
def generateTCS6(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseStartedEvent", t, "1.0.0", "TCS6", iteration)
link(msg, iterationsMap[iteration]["TCT6"], "TEST_CASE_EXECUTION")
return msg
def generateTCF6(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseFinishedEvent", t, "1.0.0", "TCF6", iteration)
link(msg, iterationsMap[iteration]["TCT6"], "TEST_CASE_EXECUTION")
msg["data"]["outcome"] = {"verdict": randomizeVerdict(0.98), "conclusion": "SUCCESSFUL"}
return msg
def generateTCT7(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseTriggeredEvent", t, "1.0.0", "TCT7", iteration)
msg["data"]["testCase"] = {"tracker": "My Other Test Management System", "id": "TC6", "uri": "https://other-tm.company.com/testCase/TC6"}
link(msg, iterationsMap[iteration]["TSS1"], "CONTEXT")
link(msg, iterationsMap[iteration]["ArtC2"], "IUT")
return msg
def generateTCS7(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseStartedEvent", t, "1.0.0", "TCS7", iteration)
link(msg, iterationsMap[iteration]["TCT7"], "TEST_CASE_EXECUTION")
return msg
def generateTCF7(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestCaseFinishedEvent", t, "1.0.0", "TCF7", iteration)
link(msg, iterationsMap[iteration]["TCT7"], "TEST_CASE_EXECUTION")
msg["data"]["outcome"] = {"verdict": randomizeVerdict(0.98), "conclusion": "SUCCESSFUL"}
return msg
def generateTSS1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestSuiteStartedEvent", t, "1.0.0", "TSS1", iteration)
link(msg, iterationsMap[iteration]["ActT3"], "CONTEXT")
msg["data"]["name"] = "My functional test suite"
msg["data"]["categories"] = ["Pre system integration tests"]
msg["data"]["types"] = ["FUNCTIONAL"]
return msg
def generateTSF1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelTestSuiteFinishedEvent", t, "1.0.0", "TSF1", iteration)
msg["data"]["outcome"] = {"verdict": getOutcomeValuesFromVerdicts([iterationsMap[iteration]["TCF5"], iterationsMap[iteration]["TCF6"], iterationsMap[iteration]["TCF7"]], "PASSED", "FAILED")}
link(msg, iterationsMap[iteration]["TSS1"], "TEST_SUITE_EXECUTION")
return msg
def generateCLM1(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelConfidenceLevelModifiedEvent", t, "1.0.0", "CLM1", iteration)
link(msg, iterationsMap[iteration]["ArtC1"], "SUBJECT")
link(msg, iterationsMap[iteration]["TCF1"], "CAUSE")
link(msg, iterationsMap[iteration]["TCF2"], "CAUSE")
link(msg, iterationsMap[iteration]["TCF3"], "CAUSE")
link(msg, iterationsMap[iteration]["TCF4"], "CAUSE")
msg["data"]["name"] = "readyForRelease"
msg["data"]["value"] = getOutcomeValuesFromVerdicts([iterationsMap[iteration]["TCF1"], iterationsMap[iteration]["TCF2"], iterationsMap[iteration]["TCF3"], iterationsMap[iteration]["TCF4"]], "SUCCESS", "FAILURE")
return msg
def generateCLM2(iterationsMap, iteration, t):
msg = generateGenericMessage("EiffelConfidenceLevelModifiedEvent", t, "1.0.0", "CLM2", iteration)
msg["data"]["name"] = "readyForSystemIntegration"
msg["data"]["value"] = getOutcomeValuesFromVerdicts([iterationsMap[iteration]["TSF1"]], "SUCCESS", "FAILURE")
link(msg, iterationsMap[iteration]["TSF1"], "CAUSE")
link(msg, iterationsMap[iteration]["ArtC2"], "SUBJECT")
return msg
def buildMsgArrayFromiterationsMap(iterationsMap):
globalArray = []
for key, itMap in iterationsMap.items():
if itMap:
globalArray.extend(buildMsgArrayFromIterationMap(itMap))
return globalArray
def buildMsgArrayFromIterationMap(iterationMap):
msgArray = []
for msg in iterationMap.values():
msgArray.append(msg)
return msgArray
def generateIterationZeroMessages(iterationsMap, t):
iterationsMap[0] = {}
iterationsMap[0]["SCS1"] = generateSCS1(iterationsMap, 0, t)
t += 1
iterationsMap[0]["EDef1"] = generateEDef1(iterationsMap, 0, t)
t += 1
iterationsMap[0]["EDef2"] = generateEDef2(iterationsMap, 0, t)
t += 1
iterationsMap[0]["ArtC3"] = generateArtC3(iterationsMap, 0, t)
t += 1
iterationsMap[0]["CDef3"] = generateCDef3(iterationsMap, 0, t)
t += 1
iterationsMap[0]["ArtCC1"] = generateArtCC1(iterationsMap, 0, t)
t += 1
iterationsMap[0]["ArtCC2"] = generateArtCC2(iterationsMap, 0, t)
t += 1
iterationsMap[0]["ArtCC3"] = generateArtCC3(iterationsMap, 0, t)
return t
def generateComponentBuildEvents(iterationsMap, iteration, t):
t += 1
iterationsMap[iteration]["SCC1"] = generateSCC1(iterationsMap, iteration, t)
t += 1
iterationsMap[iteration]["SCS1"] = generateSCS1(iterationsMap, iteration, t)
t += 100
iterationsMap[iteration]["CDef3"] = generateCDef3(iterationsMap, iteration, t)
if random.random() < 0.5:
t += 200000
iterationsMap[iteration]["ArtCC1"] = generateArtCC1(iterationsMap, iteration, t)
if random.random() < 0.5:
t += 35000
iterationsMap[iteration]["ArtCC2"] = generateArtCC2(iterationsMap, iteration, t)
t += 1000
iterationsMap[iteration]["ArtCC3"] = generateArtCC3(iterationsMap, iteration, t)
return t
def generateSubSystemBuildEvents(iterationsMap, iteration, t):
t += 100
iterationsMap[iteration]["ActT4"] = generateActT4(iterationsMap, iteration, t)
t += 1
iterationsMap[iteration]["ActS4"] = generateActS4(iterationsMap, iteration, t)
t += 100
iterationsMap[iteration]["CDef2"] = generateCDef2(iterationsMap, iteration, t)
if random.random() < 0.95:
t += 100
iterationsMap[iteration]["ArtC2"] = generateArtC2(iterationsMap, iteration, t)
t += 30000
iterationsMap[iteration]["ArtP2"] = generateArtP2(iterationsMap, iteration, t)
t += 50
iterationsMap[iteration]["ActF4"] = generateActF4(iterationsMap, iteration, t)
return t
def generateSubSystemTestEvents(iterationsMap, iteration, t):
t += 2000
iterationsMap[iteration]["ActT3"] = generateActT3(iterationsMap, iteration, t)
t += 3
iterationsMap[iteration]["ActS3"] = generateActS3(iterationsMap, iteration, t)
t += 2000
iterationsMap[iteration]["TSS1"] = generateTSS1(iterationsMap, iteration, t)
t += 100
iterationsMap[iteration]["TCT5"] = generateTCT5(iterationsMap, iteration, t)
t += 1
iterationsMap[iteration]["TCT6"] = generateTCT6(iterationsMap, iteration, t)
t += 1
iterationsMap[iteration]["TCT7"] = generateTCT7(iterationsMap, iteration, t)
t += 1
iterationsMap[iteration]["TCS5"] = generateTCS5(iterationsMap, iteration, t)
t += 2
iterationsMap[iteration]["TCS6"] = generateTCS6(iterationsMap, iteration, t)
t += 1
iterationsMap[iteration]["TCS7"] = generateTCS7(iterationsMap, iteration, t)
t += 10000
iterationsMap[iteration]["TCF5"] = generateTCF5(iterationsMap, iteration, t)
t += 3000
iterationsMap[iteration]["TCF6"] = generateTCF6(iterationsMap, iteration, t)
t += 5000
iterationsMap[iteration]["TCF7"] = generateTCF7(iterationsMap, iteration, t)
t += 50
iterationsMap[iteration]["TSF1"] = generateTSF1(iterationsMap, iteration, t)
t += 3
iterationsMap[iteration]["ActF3"] = generateActF3(iterationsMap, iteration, t)
t += 300
iterationsMap[iteration]["CLM2"] = generateCLM2(iterationsMap, iteration, t)
return t
def generateSystemIntegrationEvents(iterationsMap, iteration, t):
t += 300
iterationsMap[iteration]["CDef1"] = generateCDef1(iterationsMap, iteration, t)
t += 1000
iterationsMap[iteration]["ArtC1"] = generateArtC1(iterationsMap, iteration, t)
t += 1000
iterationsMap[iteration]["ArtP1"] = generateArtP1(iterationsMap, iteration, t)
t += 1000
iterationsMap[iteration]["ActT1"] = generateActT1(iterationsMap, iteration, t)
t += 2
iterationsMap[iteration]["ActS1"] = generateActS1(iterationsMap, iteration, t)
t += 50
iterationsMap[iteration]["ActT2"] = generateActT2(iterationsMap, iteration, t)
t += 1
iterationsMap[iteration]["TCT1"] = generateTCT1(iterationsMap, iteration, t)
t += 1
iterationsMap[iteration]["TCT2"] = generateTCT2(iterationsMap, iteration, t)
t += 1000
iterationsMap[iteration]["TCS1"] = generateTCS1(iterationsMap, iteration, t)
t += 100
iterationsMap[iteration]["TCS2"] = generateTCS2(iterationsMap, iteration, t)
t += 50000
iterationsMap[iteration]["TCF2"] = generateTCF2(iterationsMap, iteration, t)
t += 3000
iterationsMap[iteration]["TCF1"] = generateTCF1(iterationsMap, iteration, t)
t += 100
iterationsMap[iteration]["ActF1"] = generateActF1(iterationsMap, iteration, t)
t += 100000
iterationsMap[iteration]["ActS2"] = generateActS2(iterationsMap, iteration, t)
t += 1
iterationsMap[iteration]["TCT3"] = generateTCT3(iterationsMap, iteration, t)
t += 200
iterationsMap[iteration]["TCS3"] = generateTCS3(iterationsMap, iteration, t)
t += 10000
iterationsMap[iteration]["TCF3"] = generateTCF3(iterationsMap, iteration, t)
t += 1
iterationsMap[iteration]["TCT4"] = generateTCT4(iterationsMap, iteration, t)
t += 100
iterationsMap[iteration]["TCS4"] = generateTCS4(iterationsMap, iteration, t)
t += 120000
iterationsMap[iteration]["TCF4"] = generateTCF4(iterationsMap, iteration, t)
t += 20
iterationsMap[iteration]["ActF2"] = generateActF2(iterationsMap, iteration, t)
t += 2500
iterationsMap[iteration]["CLM1"] = generateCLM1(iterationsMap, iteration, t)
return t
def generateIterationMessages(iterationsMap, iteration, t):
iterationsMap[iteration] = {}
t = generateComponentBuildEvents(iterationsMap, iteration, t)
t = generateSubSystemBuildEvents(iterationsMap, iteration, t)
if "ArtC2" in iterationsMap[iteration]:
t = generateSubSystemTestEvents(iterationsMap, iteration, t)
if iterationsMap[iteration]["CLM2"]["data"]["value"] == "SUCCESS":
t = generateSystemIntegrationEvents(iterationsMap, iteration, t)
return t
def main(iterations):
t = int(time.time() * 1000)
iterationsMap = {}
t = generateIterationZeroMessages(iterationsMap, t)
for iteration in range(1, iterations + 1):
t += 10000
t = generateIterationMessages(iterationsMap, iteration, t)
out = buildMsgArrayFromiterationsMap(iterationsMap)
out.sort(key=lambda x: x["meta"]["time"], reverse=False)
print(json.dumps(out, indent=2, separators=(",", ": ")))
def usage():
print("-h, --help")
print(" Print this text.")
print("-i ..., --iterations=...")
print(" Specify the number of iterations to create.")
print(" Default: 1")
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:", ["help", "iterations="])
except getopt.GetoptError:
usage()
sys.exit(2)
iterations = 1
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-i", "--iterations"):
iterations = int(arg)
main(iterations)
|
|
"""Tests for the PS4 media player platform."""
from unittest.mock import MagicMock, patch
from pyps4_2ndscreen.credential import get_ddp_message
from pyps4_2ndscreen.ddp import DEFAULT_UDP_PORT
from pyps4_2ndscreen.media_art import TYPE_APP as PS_TYPE_APP
from homeassistant.components import ps4
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_TITLE,
MEDIA_TYPE_APP,
MEDIA_TYPE_GAME,
)
from homeassistant.components.ps4.const import (
ATTR_MEDIA_IMAGE_URL,
CONFIG_ENTRY_VERSION as VERSION,
DEFAULT_REGION,
DOMAIN,
GAMES_FILE,
PS4_DATA,
)
from homeassistant.const import (
ATTR_COMMAND,
ATTR_ENTITY_ID,
ATTR_LOCKED,
CONF_HOST,
CONF_NAME,
CONF_REGION,
CONF_TOKEN,
STATE_IDLE,
STATE_PLAYING,
STATE_STANDBY,
STATE_UNKNOWN,
)
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry, mock_device_registry, mock_registry
MOCK_CREDS = "123412341234abcd12341234abcd12341234abcd12341234abcd12341234abcd"
MOCK_NAME = "ha_ps4_name"
MOCK_REGION = DEFAULT_REGION
MOCK_GAMES_FILE = GAMES_FILE
MOCK_HOST = "192.168.0.2"
MOCK_HOST_NAME = "Fake PS4"
MOCK_HOST_ID = "A0000A0AA000"
MOCK_HOST_VERSION = "09879011"
MOCK_HOST_TYPE = "PS4"
MOCK_STATUS_REST = "Server Standby"
MOCK_STATUS_ON = "Ok"
MOCK_STANDBY_CODE = 620
MOCK_ON_CODE = 200
MOCK_TCP_PORT = 997
MOCK_DDP_PORT = 987
MOCK_DDP_VERSION = "00020020"
MOCK_RANDOM_PORT = "1234"
MOCK_TITLE_ID = "CUSA00000"
MOCK_TITLE_NAME = "Random Game"
MOCK_TITLE_TYPE = MEDIA_TYPE_GAME
MOCK_TITLE_ART_URL = "https://somecoverurl"
MOCK_GAMES_DATA = {
ATTR_LOCKED: False,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_GAME,
ATTR_MEDIA_IMAGE_URL: MOCK_TITLE_ART_URL,
ATTR_MEDIA_TITLE: MOCK_TITLE_NAME,
}
MOCK_GAMES_DATA_LOCKED = {
ATTR_LOCKED: True,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_GAME,
ATTR_MEDIA_IMAGE_URL: MOCK_TITLE_ART_URL,
ATTR_MEDIA_TITLE: MOCK_TITLE_NAME,
}
MOCK_STATUS_PLAYING = {
"host-type": MOCK_HOST_TYPE,
"host-ip": MOCK_HOST,
"host-request-port": MOCK_TCP_PORT,
"host-id": MOCK_HOST_ID,
"host-name": MOCK_HOST_NAME,
"running-app-titleid": MOCK_TITLE_ID,
"running-app-name": MOCK_TITLE_NAME,
"status": MOCK_STATUS_ON,
"status_code": MOCK_ON_CODE,
"device-discovery-protocol-version": MOCK_DDP_VERSION,
"system-version": MOCK_HOST_VERSION,
}
MOCK_STATUS_IDLE = {
"host-type": MOCK_HOST_TYPE,
"host-ip": MOCK_HOST,
"host-request-port": MOCK_TCP_PORT,
"host-id": MOCK_HOST_ID,
"host-name": MOCK_HOST_NAME,
"status": MOCK_STATUS_ON,
"status_code": MOCK_ON_CODE,
"device-discovery-protocol-version": MOCK_DDP_VERSION,
"system-version": MOCK_HOST_VERSION,
}
MOCK_STATUS_STANDBY = {
"host-type": MOCK_HOST_TYPE,
"host-ip": MOCK_HOST,
"host-request-port": MOCK_TCP_PORT,
"host-id": MOCK_HOST_ID,
"host-name": MOCK_HOST_NAME,
"status": MOCK_STATUS_REST,
"status_code": MOCK_STANDBY_CODE,
"device-discovery-protocol-version": MOCK_DDP_VERSION,
"system-version": MOCK_HOST_VERSION,
}
MOCK_DEVICE = {CONF_HOST: MOCK_HOST, CONF_NAME: MOCK_NAME, CONF_REGION: MOCK_REGION}
MOCK_ENTRY_ID = "SomeID"
MOCK_DEVICE_MODEL = "PlayStation 4"
MOCK_DATA = {CONF_TOKEN: MOCK_CREDS, "devices": [MOCK_DEVICE]}
MOCK_CONFIG = MockConfigEntry(domain=DOMAIN, data=MOCK_DATA, entry_id=MOCK_ENTRY_ID)
MOCK_LOAD = "homeassistant.components.ps4.media_player.load_games"
async def setup_mock_component(hass, entry=None):
"""Set up Mock Media Player."""
if entry is None:
mock_entry = MockConfigEntry(
domain=ps4.DOMAIN, data=MOCK_DATA, version=VERSION, entry_id=MOCK_ENTRY_ID
)
else:
mock_entry = entry
mock_entry.add_to_hass(hass)
await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
mock_entities = hass.states.async_entity_ids()
mock_entity_id = mock_entities[0]
return mock_entity_id
async def mock_ddp_response(hass, mock_status_data):
"""Mock raw UDP response from device."""
mock_protocol = hass.data[PS4_DATA].protocol
assert mock_protocol.local_port == DEFAULT_UDP_PORT
mock_code = mock_status_data.get("status_code")
mock_status = mock_status_data.get("status")
mock_status_header = f"{mock_code} {mock_status}"
mock_response = get_ddp_message(mock_status_header, mock_status_data).encode()
mock_protocol.datagram_received(mock_response, (MOCK_HOST, MOCK_RANDOM_PORT))
await hass.async_block_till_done()
async def test_media_player_is_setup_correctly_with_entry(hass):
"""Test entity is setup correctly with entry correctly."""
mock_entity_id = await setup_mock_component(hass)
mock_state = hass.states.get(mock_entity_id).state
# Assert status updated callback is added to protocol.
assert len(hass.data[PS4_DATA].protocol.callbacks) == 1
# Test that entity is added to hass.
assert hass.data[PS4_DATA].protocol is not None
assert mock_entity_id == f"media_player.{MOCK_NAME}"
assert mock_state == STATE_UNKNOWN
async def test_state_standby_is_set(hass):
"""Test that state is set to standby."""
mock_entity_id = await setup_mock_component(hass)
await mock_ddp_response(hass, MOCK_STATUS_STANDBY)
assert hass.states.get(mock_entity_id).state == STATE_STANDBY
async def test_state_playing_is_set(hass):
"""Test that state is set to playing."""
mock_entity_id = await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4.media_player.",
"pyps4.Ps4Async.async_get_ps_store_data",
)
with patch(mock_func, return_value=None):
await mock_ddp_response(hass, MOCK_STATUS_PLAYING)
assert hass.states.get(mock_entity_id).state == STATE_PLAYING
async def test_state_idle_is_set(hass):
"""Test that state is set to idle."""
mock_entity_id = await setup_mock_component(hass)
await mock_ddp_response(hass, MOCK_STATUS_IDLE)
assert hass.states.get(mock_entity_id).state == STATE_IDLE
async def test_state_none_is_set(hass):
"""Test that state is set to None."""
mock_entity_id = await setup_mock_component(hass)
assert hass.states.get(mock_entity_id).state == STATE_UNKNOWN
async def test_media_attributes_are_fetched(hass):
"""Test that media attributes are fetched."""
mock_entity_id = await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4.media_player.",
"pyps4.Ps4Async.async_get_ps_store_data",
)
# Mock result from fetching data.
mock_result = MagicMock()
mock_result.name = MOCK_TITLE_NAME
mock_result.cover_art = MOCK_TITLE_ART_URL
mock_result.game_type = "not_an_app"
with patch(mock_func, return_value=mock_result) as mock_fetch:
await mock_ddp_response(hass, MOCK_STATUS_PLAYING)
mock_state = hass.states.get(mock_entity_id)
mock_attrs = dict(mock_state.attributes)
assert len(mock_fetch.mock_calls) == 1
assert mock_state.state == STATE_PLAYING
assert len(mock_attrs.get(ATTR_INPUT_SOURCE_LIST)) == 1
assert mock_attrs.get(ATTR_INPUT_SOURCE_LIST)[0] == MOCK_TITLE_NAME
assert mock_attrs.get(ATTR_MEDIA_CONTENT_ID) == MOCK_TITLE_ID
assert mock_attrs.get(ATTR_MEDIA_TITLE) == MOCK_TITLE_NAME
assert mock_attrs.get(ATTR_MEDIA_CONTENT_TYPE) == MOCK_TITLE_TYPE
# Change state so that the next fetch is called.
await mock_ddp_response(hass, MOCK_STATUS_STANDBY)
# Test that content type of app is set.
mock_result.game_type = PS_TYPE_APP
with patch(mock_func, return_value=mock_result) as mock_fetch_app:
await mock_ddp_response(hass, MOCK_STATUS_PLAYING)
mock_state = hass.states.get(mock_entity_id)
mock_attrs = dict(mock_state.attributes)
assert len(mock_fetch_app.mock_calls) == 1
assert mock_attrs.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_APP
async def test_media_attributes_are_loaded(hass, patch_load_json):
"""Test that media attributes are loaded."""
mock_entity_id = await setup_mock_component(hass)
patch_load_json.return_value = {MOCK_TITLE_ID: MOCK_GAMES_DATA_LOCKED}
with patch(
"homeassistant.components.ps4.media_player."
"pyps4.Ps4Async.async_get_ps_store_data",
return_value=None,
) as mock_fetch:
await mock_ddp_response(hass, MOCK_STATUS_PLAYING)
mock_state = hass.states.get(mock_entity_id)
mock_attrs = dict(mock_state.attributes)
# Ensure that data is not fetched.
assert not mock_fetch.mock_calls
assert mock_state.state == STATE_PLAYING
assert len(mock_attrs.get(ATTR_INPUT_SOURCE_LIST)) == 1
assert mock_attrs.get(ATTR_INPUT_SOURCE_LIST)[0] == MOCK_TITLE_NAME
assert mock_attrs.get(ATTR_MEDIA_CONTENT_ID) == MOCK_TITLE_ID
assert mock_attrs.get(ATTR_MEDIA_TITLE) == MOCK_TITLE_NAME
assert mock_attrs.get(ATTR_MEDIA_CONTENT_TYPE) == MOCK_TITLE_TYPE
async def test_device_info_is_set_from_status_correctly(hass, patch_get_status):
"""Test that device info is set correctly from status update."""
mock_d_registry = mock_device_registry(hass)
patch_get_status.return_value = MOCK_STATUS_STANDBY
mock_entity_id = await setup_mock_component(hass)
await hass.async_block_till_done()
# Reformat mock status-sw_version for assertion.
mock_version = MOCK_STATUS_STANDBY["system-version"]
mock_version = mock_version[1:4]
mock_version = f"{mock_version[0]}.{mock_version[1:]}"
mock_state = hass.states.get(mock_entity_id).state
mock_d_entries = mock_d_registry.devices
mock_entry = mock_d_registry.async_get_device(identifiers={(DOMAIN, MOCK_HOST_ID)})
assert mock_state == STATE_STANDBY
assert len(mock_d_entries) == 1
assert mock_entry.name == MOCK_HOST_NAME
assert mock_entry.model == MOCK_DEVICE_MODEL
assert mock_entry.sw_version == mock_version
assert mock_entry.identifiers == {(DOMAIN, MOCK_HOST_ID)}
async def test_device_info_is_assummed(hass):
"""Test that device info is assumed if device is unavailable."""
# Create a device registry entry with device info.
mock_d_registry = mock_device_registry(hass)
mock_d_registry.async_get_or_create(
config_entry_id=MOCK_ENTRY_ID,
name=MOCK_HOST_NAME,
model=MOCK_DEVICE_MODEL,
identifiers={(DOMAIN, MOCK_HOST_ID)},
sw_version=MOCK_HOST_VERSION,
)
mock_d_entries = mock_d_registry.devices
assert len(mock_d_entries) == 1
# Create a entity_registry entry which is using identifiers from device.
mock_unique_id = ps4.format_unique_id(MOCK_CREDS, MOCK_HOST_ID)
mock_e_registry = mock_registry(hass)
mock_e_registry.async_get_or_create(
"media_player", DOMAIN, mock_unique_id, config_entry=MOCK_CONFIG
)
mock_entity_id = mock_e_registry.async_get_entity_id(
"media_player", DOMAIN, mock_unique_id
)
mock_entity_id = await setup_mock_component(hass)
mock_state = hass.states.get(mock_entity_id).state
# Ensure that state is not set.
assert mock_state == STATE_UNKNOWN
# Ensure that entity_id is the same as the existing.
mock_entities = hass.states.async_entity_ids()
assert len(mock_entities) == 1
assert mock_entities[0] == mock_entity_id
async def test_device_info_assummed_works(hass):
"""Reverse test that device info assumption works."""
mock_d_registry = mock_device_registry(hass)
mock_entity_id = await setup_mock_component(hass)
mock_state = hass.states.get(mock_entity_id).state
mock_d_entries = mock_d_registry.devices
# Ensure that state is not set.
assert mock_state == STATE_UNKNOWN
# With no state/status and no existing entries, registry should be empty.
assert not mock_d_entries
async def test_turn_on(hass):
"""Test that turn on service calls function."""
mock_entity_id = await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4.media_player.", "pyps4.Ps4Async.wakeup"
)
with patch(mock_func) as mock_call:
await hass.services.async_call(
"media_player", "turn_on", {ATTR_ENTITY_ID: mock_entity_id}
)
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 1
async def test_turn_off(hass):
"""Test that turn off service calls function."""
mock_entity_id = await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4.media_player.", "pyps4.Ps4Async.standby"
)
with patch(mock_func) as mock_call:
await hass.services.async_call(
"media_player", "turn_off", {ATTR_ENTITY_ID: mock_entity_id}
)
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 1
async def test_toggle(hass):
"""Test that toggle service calls function."""
mock_entity_id = await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4.media_player.", "pyps4.Ps4Async.toggle"
)
with patch(mock_func) as mock_call:
await hass.services.async_call(
"media_player", "toggle", {ATTR_ENTITY_ID: mock_entity_id}
)
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 1
async def test_media_pause(hass):
"""Test that media pause service calls function."""
mock_entity_id = await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4.media_player.", "pyps4.Ps4Async.remote_control"
)
with patch(mock_func) as mock_call:
await hass.services.async_call(
"media_player", "media_pause", {ATTR_ENTITY_ID: mock_entity_id}
)
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 1
async def test_media_stop(hass):
"""Test that media stop service calls function."""
mock_entity_id = await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4.media_player.", "pyps4.Ps4Async.remote_control"
)
with patch(mock_func) as mock_call:
await hass.services.async_call(
"media_player", "media_stop", {ATTR_ENTITY_ID: mock_entity_id}
)
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 1
async def test_select_source(hass, patch_load_json):
"""Test that select source service calls function with title."""
patch_load_json.return_value = {MOCK_TITLE_ID: MOCK_GAMES_DATA}
with patch("pyps4_2ndscreen.ps4.get_status", return_value=MOCK_STATUS_IDLE):
mock_entity_id = await setup_mock_component(hass)
with patch("pyps4_2ndscreen.ps4.Ps4Async.start_title") as mock_call, patch(
"homeassistant.components.ps4.media_player.PS4Device.async_update"
):
# Test with title name.
await hass.services.async_call(
"media_player",
"select_source",
{ATTR_ENTITY_ID: mock_entity_id, ATTR_INPUT_SOURCE: MOCK_TITLE_NAME},
blocking=True,
)
assert len(mock_call.mock_calls) == 1
async def test_select_source_caps(hass, patch_load_json):
"""Test that select source service calls function with upper case title."""
patch_load_json.return_value = {MOCK_TITLE_ID: MOCK_GAMES_DATA}
with patch("pyps4_2ndscreen.ps4.get_status", return_value=MOCK_STATUS_IDLE):
mock_entity_id = await setup_mock_component(hass)
with patch("pyps4_2ndscreen.ps4.Ps4Async.start_title") as mock_call, patch(
"homeassistant.components.ps4.media_player.PS4Device.async_update"
):
# Test with title name in caps.
await hass.services.async_call(
"media_player",
"select_source",
{
ATTR_ENTITY_ID: mock_entity_id,
ATTR_INPUT_SOURCE: MOCK_TITLE_NAME.upper(),
},
blocking=True,
)
assert len(mock_call.mock_calls) == 1
async def test_select_source_id(hass, patch_load_json):
"""Test that select source service calls function with Title ID."""
patch_load_json.return_value = {MOCK_TITLE_ID: MOCK_GAMES_DATA}
with patch("pyps4_2ndscreen.ps4.get_status", return_value=MOCK_STATUS_IDLE):
mock_entity_id = await setup_mock_component(hass)
with patch("pyps4_2ndscreen.ps4.Ps4Async.start_title") as mock_call, patch(
"homeassistant.components.ps4.media_player.PS4Device.async_update"
):
# Test with title ID.
await hass.services.async_call(
"media_player",
"select_source",
{ATTR_ENTITY_ID: mock_entity_id, ATTR_INPUT_SOURCE: MOCK_TITLE_ID},
blocking=True,
)
assert len(mock_call.mock_calls) == 1
async def test_ps4_send_command(hass):
"""Test that ps4 send command service calls function."""
mock_entity_id = await setup_mock_component(hass)
with patch("pyps4_2ndscreen.ps4.Ps4Async.remote_control") as mock_call:
await hass.services.async_call(
DOMAIN,
"send_command",
{ATTR_ENTITY_ID: mock_entity_id, ATTR_COMMAND: "ps"},
blocking=True,
)
assert len(mock_call.mock_calls) == 1
async def test_entry_is_unloaded(hass):
"""Test that entry is unloaded."""
mock_entry = MockConfigEntry(
domain=ps4.DOMAIN, data=MOCK_DATA, version=VERSION, entry_id=MOCK_ENTRY_ID
)
mock_entity_id = await setup_mock_component(hass, mock_entry)
mock_unload = await ps4.async_unload_entry(hass, mock_entry)
assert mock_unload is True
assert not hass.data[PS4_DATA].devices
# Test that callback listener for entity is removed from protocol.
assert not hass.data[PS4_DATA].protocol.callbacks
assert hass.states.get(mock_entity_id) is None
|
|
# Imports
from machine import I2C
# Register pointers
REG_CONFIG = const(1)
REG_TEMP_BOUNDARY_UPPER = const(2)
REG_TEMP_BOUNDARY_LOWER = const(3)
REG_TEMP_BOUNDARY_CRITICAL = const(4)
REG_TEMP = const(5)
REG_MANUFACTURER_ID = const(6)
REG_DEVIDE_ID = const(7)
REG_RESOLUTION = const(8)
# Sensor resolution values
TEMP_RESOLUTION_MIN = const(0) # +0.5 C, refresh rate 30 ms
TEMP_RESOLUTION_LOW = const(1) # +0.25 C, refresh rate 65 ms
TEMP_RESOLUTION_AVG = const(2) # +0.125 C, refresh rate 130 ms
TEMP_RESOLUTION_MAX = const(3) # +0.0625 C, refresh rate 250 ms [Default]
# Alert selectors
ALERT_SELECT_ALL = const(0) # ambient > upper || ambient > critical || ambient < lower [Default]
ALERT_SELECT_CRIT = const(1) # Ambient temp > critical
# Alert polarity
ALERT_POLARITY_ALOW = const(0) # Active-low, requires pull-up [Default]
ALERT_POLARITY_AHIGH = const(1) # Active-high
# Alert output mode
ALERT_OUTPUT_COMPARATOR = const(0)
ALERT_OUTPUT_INTERRUPT = const(1)
class MCP9808(object):
"""
This class implements an interface to the MCP9808 temprature sensor from
Microchip.
"""
def __init__(self, i2c=None, addr=0x18):
"""
Initialize a sensor object on the given I2C bus and accessed by the
given address.
"""
if i2c == None or i2c.__class__ != I2C:
raise ValueError('I2C object needed as argument!')
self._i2c = i2c
self._addr = addr
self._check_device()
def _send(self, buf):
"""
Sends the given bufer object over I2C to the sensor.
"""
if hasattr(self._i2c, "writeto"):
# Micropython
self._i2c.writeto(self._addr, buf)
elif hasattr(self._i2c, "send"):
# PyBoard Micropython
self._i2c.send(self._addr, buf)
else:
raise Exception("Invalid I2C object. Unknown Micropython/platform?")
def _recv(self, n):
"""
Read bytes from the sensor using I2C. The byte count must be specified
as an argument.
Returns a bytearray containing the result.
"""
if hasattr(self._i2c, "writeto"):
# Micropython (PyCom)
return self._i2c.readfrom(self._addr, n)
elif hasattr(self._i2c, "send"):
# PyBoard Micropython
return self._i2c.recv(n, self._addr)
else:
raise Exception("Invalid I2C object. Unknown Micropython/platform?")
def _check_device(self):
"""
Tries to identify the manufacturer and device identifiers.
"""
self._send(REG_MANUFACTURER_ID)
self._m_id = self._recv(2)
if not self._m_id == b'\x00T':
raise Exception("Invalid manufacturer ID: '%s'!" % self._m_id)
self._send(REG_DEVIDE_ID)
self._d_id = self._recv(2)
if not self._d_id == b'\x04\x00':
raise Exception("Invalid device or revision ID: '%s'!" % self._d_id)
def set_shutdown_mode(self, shdn=True):
"""
Set sensor into shutdown mode to draw less than 1 uA and disable
continous temperature conversion.
"""
if shdn.__class__ != bool:
raise ValueError('Boolean argument needed to set shutdown mode!')
self._send(REG_CONFIG)
cfg = self._recv(2)
b = bytearray()
b.append(REG_CONFIG)
if shdn:
b.append(cfg[0] | 1)
else:
b.append(cfg[0] & ~1)
b.append(cfg[1])
self._send(b)
def set_alert_mode(self, enable_alert=True, output_mode=ALERT_OUTPUT_INTERRUPT, polarity=ALERT_POLARITY_ALOW, selector=ALERT_SELECT_ALL):
"""
Set sensor into alert mode with the provided output,
polarity and selector parameters
If output mode is set to interrupt, a call to acknowledge_alert_irq()
is required to deassert the MCP9808
"""
if enable_alert.__class__ != bool:
raise ValueError('Boolean argument needed to set alert mode!')
if output_mode not in [ALERT_OUTPUT_COMPARATOR, ALERT_OUTPUT_INTERRUPT]:
raise ValueError("Invalid output mode set.")
if selector not in [ALERT_SELECT_ALL, ALERT_SELECT_CRIT]:
raise ValueError("Invalid alert selector set.")
if polarity not in [ALERT_POLARITY_ALOW, ALERT_POLARITY_AHIGH]:
raise ValueError("Invalid alert polarity set.")
enable_alert = 1 if enable_alert else 0
self._send(REG_CONFIG)
cfg = self._recv(2)
alert_bits = (output_mode | (polarity << 1) | (selector << 2) | (enable_alert << 3)) & 0xF
lsb_data = (cfg[1] & 0xF0) | alert_bits
b = bytearray()
b.append(REG_CONFIG)
b.append(cfg[0])
b.append(lsb_data)
self._send(b)
def acknowledge_alert_irq(self):
"""
Must be called if MCP9808 is operating in interrupt output mode
"""
self._send(REG_CONFIG)
cfg = self._recv(2)
b = bytearray()
b.append(REG_CONFIG)
b.append(cfg[0]) # MSB data
b.append(cfg[1] | 0x20) # LSB data with interrupt clear bit set
self._send(b)
def set_alert_boundary_temp(self, boundary_register, value):
"""
Sets the alert boundary for the requested boundary register
"""
if boundary_register not in [REG_TEMP_BOUNDARY_LOWER, REG_TEMP_BOUNDARY_UPPER, REG_TEMP_BOUNDARY_CRITICAL]:
raise ValueError("Given alert boundary register is not valid!")
if value < -128 or value > 127: # 8 bit two's complement
raise ValueError("Temperature out of range [-128, 127]")
integral = int(value)
frac = abs(value - integral)
if integral < 0:
integral = (1 << 9) + integral
integral = ((integral & 0x1FF) << 4)
frac = (((1 if frac * 2 >= 1 else 0) << 1) + (1 if (frac * 2 - int(frac * 2)) * 2 >= 1 else 0)) << 2
twos_value = (integral + frac if value >= 0 else integral - frac) & 0x1ffc
b = bytearray()
b.append(boundary_register)
b.append((twos_value & 0xFF00) >> 8)
b.append(twos_value & 0xFF)
self._send(b)
def set_resolution(self, r):
"""
Sets the temperature resolution.
"""
if r not in [TEMP_RESOLUTION_MIN, TEMP_RESOLUTION_LOW, TEMP_RESOLUTION_AVG, TEMP_RESOLUTION_MAX]:
raise ValueError('Invalid temperature resolution requested!')
b = bytearray()
b.append(REG_RESOLUTION)
b.append(r)
self._send(b)
def get_temp(self):
"""
Read temperature in degree celsius and return float value.
"""
self._send(REG_TEMP)
raw = self._recv(2)
u = (raw[0] & 0x0f) << 4
l = raw[1] / 16
if raw[0] & 0x10 == 0x10:
temp = (u + l) - 256
else:
temp = u + l
return temp
def get_temp_int(self):
"""
Read a temperature in degree celsius and return a tuple of two parts.
The first part is the decimal part and the second the fractional part
of the value.
This method does avoid floating point arithmetic completely to support
platforms missing float support.
"""
self._send(REG_TEMP)
raw = self._recv(2)
u = (raw[0] & 0xf) << 4
l = raw[1] >> 4
if raw[0] & 0x10 == 0x10:
temp = (u + l) - 256
frac = -((raw[1] & 0x0f) * 100 >> 4)
else:
temp = u + l
frac = (raw[1] & 0x0f) * 100 >> 4
return temp, frac
def _debug_config(self, cfg=None):
"""
Prints the first 9 bits of the config register mapped to human
readable descriptions
"""
if not cfg:
self._send(REG_CONFIG)
cfg = self._recv(2)
# meanings[a][b] with a the bit index (LSB order),
# b=0 the config description and b={bit value}+1 the value description
meanings = [
["Alert output mode", "Comparator", "Interrupt"],
["Alert polarity", "Active-low", "Active-high"],
["Alert Selector", "All", "Only Critical"],
["Alert enabled", "False", "True"],
["Alert status", "Not asserted", "Asserted as set by mode"],
["Interrupt clear bit", "0", "1"],
["Window [low, high] locked", "Unlocked", "Locked"],
["Critical locked", "Unlocked", "Locked"],
["Shutdown", "False", "True"]
]
print("Raw config: {}".format(str(cfg)))
for i in range(0, min(len(meanings), len(cfg)*8)):
part = 0 if i > 7 else 1
value = 1 if (cfg[part] & (2**(i % 8))) > 0 else 0
print(meanings[i][0] + ": " + meanings[i][1 + value])
|
|
from __future__ import unicode_literals
from django.db import models
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.encoding import python_2_unicode_compatible
from taggit.models import TaggedItemBase
from taggit.managers import TaggableManager
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from modelcluster.contrib.taggit import ClusterTaggableManager
from wagtail.wagtailcore.models import Page, Orderable
from wagtail.wagtailcore.fields import RichTextField, StreamField
from wagtail.wagtailcore.blocks import CharBlock, RichTextBlock
from wagtail.wagtailadmin.edit_handlers import FieldPanel, MultiFieldPanel, InlinePanel, PageChooserPanel, TabbedInterface, ObjectList
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtaildocs.edit_handlers import DocumentChooserPanel
from wagtail.wagtailsnippets.models import register_snippet
from wagtail.wagtailforms.models import AbstractEmailForm, AbstractFormField
from wagtail.wagtailsnippets.edit_handlers import SnippetChooserPanel
from wagtail.wagtailsearch import index
from wagtail.wagtailimages.models import AbstractImage, Image
from wagtail.wagtailimages.blocks import ImageChooserBlock
EVENT_AUDIENCE_CHOICES = (
('public', "Public"),
('private', "Private"),
)
COMMON_PANELS = (
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('show_in_menus'),
FieldPanel('search_description'),
)
# Link fields
class LinkFields(models.Model):
link_external = models.URLField("External link", blank=True)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
related_name='+'
)
link_document = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
related_name='+'
)
@property
def link(self):
if self.link_page:
return self.link_page.url
elif self.link_document:
return self.link_document.url
else:
return self.link_external
panels = [
FieldPanel('link_external'),
PageChooserPanel('link_page'),
DocumentChooserPanel('link_document'),
]
class Meta:
abstract = True
# Carousel items
class CarouselItem(LinkFields):
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
embed_url = models.URLField("Embed URL", blank=True)
caption = models.CharField(max_length=255, blank=True)
panels = [
ImageChooserPanel('image'),
FieldPanel('embed_url'),
FieldPanel('caption'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Related links
class RelatedLink(LinkFields):
title = models.CharField(max_length=255, help_text="Link title")
panels = [
FieldPanel('title'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Simple page
class SimplePage(Page):
content = models.TextField()
class PageWithOldStyleRouteMethod(Page):
"""
Prior to Wagtail 0.4, the route() method on Page returned an HttpResponse
rather than a Page instance. As subclasses of Page may override route,
we need to continue accepting this convention (albeit as a deprecated API).
"""
content = models.TextField()
template = 'tests/simple_page.html'
def route(self, request, path_components):
return self.serve(request)
# Event page
class EventPageCarouselItem(Orderable, CarouselItem):
page = ParentalKey('tests.EventPage', related_name='carousel_items')
class EventPageRelatedLink(Orderable, RelatedLink):
page = ParentalKey('tests.EventPage', related_name='related_links')
class EventPageSpeaker(Orderable, LinkFields):
page = ParentalKey('tests.EventPage', related_name='speakers')
first_name = models.CharField("Name", max_length=255, blank=True)
last_name = models.CharField("Surname", max_length=255, blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def name_display(self):
return self.first_name + " " + self.last_name
panels = [
FieldPanel('first_name'),
FieldPanel('last_name'),
ImageChooserPanel('image'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class EventPage(Page):
date_from = models.DateField("Start date", null=True)
date_to = models.DateField(
"End date",
null=True,
blank=True,
help_text="Not required if event is on a single day"
)
time_from = models.TimeField("Start time", null=True, blank=True)
time_to = models.TimeField("End time", null=True, blank=True)
audience = models.CharField(max_length=255, choices=EVENT_AUDIENCE_CHOICES)
location = models.CharField(max_length=255)
body = RichTextField(blank=True)
cost = models.CharField(max_length=255)
signup_link = models.URLField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
search_fields = (
index.SearchField('get_audience_display'),
index.SearchField('location'),
index.SearchField('body'),
)
password_required_template = 'tests/event_page_password_required.html'
EventPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('date_from'),
FieldPanel('date_to'),
FieldPanel('time_from'),
FieldPanel('time_to'),
FieldPanel('location'),
FieldPanel('audience'),
FieldPanel('cost'),
FieldPanel('signup_link'),
InlinePanel('carousel_items', label="Carousel items"),
FieldPanel('body', classname="full"),
InlinePanel('speakers', label="Speakers"),
InlinePanel('related_links', label="Related links"),
]
EventPage.promote_panels = [
MultiFieldPanel(COMMON_PANELS, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
# Just to be able to test multi table inheritance
class SingleEventPage(EventPage):
excerpt = models.TextField(max_length=255, blank=True, null=True, help_text="Short text to describe what is this action about")
SingleEventPage.content_panels = [FieldPanel('excerpt')] + EventPage.content_panels
# Event index (has a separate AJAX template, and a custom template context)
class EventIndex(Page):
intro = RichTextField(blank=True)
ajax_template = 'tests/includes/event_listing.html'
def get_events(self):
return self.get_children().live().type(EventPage)
def get_paginator(self):
return Paginator(self.get_events(), 4)
def get_context(self, request, page=1):
# Pagination
paginator = self.get_paginator()
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginator.page(paginator.num_pages)
# Update context
context = super(EventIndex, self).get_context(request)
context['events'] = events
return context
def route(self, request, path_components):
if self.live and len(path_components) == 1:
try:
return self.serve(request, page=int(path_components[0]))
except (TypeError, ValueError):
pass
return super(EventIndex, self).route(request, path_components)
def get_static_site_paths(self):
# Get page count
page_count = self.get_paginator().num_pages
# Yield a path for each page
for page in range(page_count):
yield '/%d/' % (page + 1)
# Yield from superclass
for path in super(EventIndex, self).get_static_site_paths():
yield path
def get_sitemap_urls(self):
# Add past events url to sitemap
return super(EventIndex, self).get_sitemap_urls() + [
{
'location': self.full_url + 'past/',
'lastmod': self.latest_revision_created_at
}
]
EventIndex.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
]
class FormField(AbstractFormField):
page = ParentalKey('FormPage', related_name='form_fields')
class FormPage(AbstractEmailForm):
def get_context(self, request):
context = super(FormPage, self).get_context(request)
context['greeting'] = "hello world"
return context
FormPage.content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# Snippets
class AdvertPlacement(models.Model):
page = ParentalKey('wagtailcore.Page', related_name='advert_placements')
advert = models.ForeignKey('tests.Advert', related_name='+')
colour = models.CharField(max_length=255)
class AdvertTag(TaggedItemBase):
content_object = ParentalKey('Advert', related_name='tagged_items')
@python_2_unicode_compatible
class Advert(ClusterableModel):
url = models.URLField(null=True, blank=True)
text = models.CharField(max_length=255)
tags = TaggableManager(through=AdvertTag, blank=True)
panels = [
FieldPanel('url'),
FieldPanel('text'),
FieldPanel('tags'),
]
def __str__(self):
return self.text
register_snippet(Advert)
class StandardIndex(Page):
""" Index for the site, not allowed to be placed anywhere """
parent_page_types = []
# A custom panel setup where all Promote fields are placed in the Content tab instead;
# we use this to test that the 'promote' tab is left out of the output when empty
StandardIndex.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('seo_title'),
FieldPanel('slug'),
InlinePanel('advert_placements', label="Adverts"),
]
StandardIndex.promote_panels = []
class StandardChild(Page):
pass
# Test overriding edit_handler with a custom one
StandardChild.edit_handler = TabbedInterface([
ObjectList(StandardChild.content_panels, heading='Content'),
ObjectList(StandardChild.promote_panels, heading='Promote'),
ObjectList(StandardChild.settings_panels, heading='Settings', classname='settings'),
ObjectList([], heading='Dinosaurs'),
])
class BusinessIndex(Page):
""" Can be placed anywhere, can only have Business children """
subpage_types = ['tests.BusinessChild', 'tests.BusinessSubIndex']
class BusinessSubIndex(Page):
""" Can be placed under BusinessIndex, and have BusinessChild children """
subpage_types = ['tests.BusinessChild']
parent_page_types = ['tests.BusinessIndex']
class BusinessChild(Page):
""" Can only be placed under Business indexes, no children allowed """
subpage_types = []
parent_page_types = ['tests.BusinessIndex', BusinessSubIndex]
class TaggedPageTag(TaggedItemBase):
content_object = ParentalKey('tests.TaggedPage', related_name='tagged_items')
class TaggedPage(Page):
tags = ClusterTaggableManager(through=TaggedPageTag, blank=True)
TaggedPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('tags'),
]
class PageChooserModel(models.Model):
page = models.ForeignKey('wagtailcore.Page', help_text='help text')
class EventPageChooserModel(models.Model):
page = models.ForeignKey('tests.EventPage', help_text='more help text')
class SnippetChooserModel(models.Model):
advert = models.ForeignKey(Advert, help_text='help text')
panels = [
SnippetChooserPanel('advert', Advert),
]
class CustomImage(AbstractImage):
caption = models.CharField(max_length=255)
not_editable_field = models.CharField(max_length=255)
admin_form_fields = Image.admin_form_fields + (
'caption',
)
class StreamModel(models.Model):
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ImageChooserBlock()),
])
class StreamPage(Page):
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ImageChooserBlock()),
])
api_fields = ('body',)
class MTIBasePage(Page):
is_creatable = False
class MTIChildPage(MTIBasePage):
# Should be creatable by default, no need to set anything
pass
class AbstractPage(Page):
class Meta:
abstract = True
|
|
from biicode.client.rest.rest_api import RestApiClient
from biicode.common.utils.serializer import Serializer, ListDeserializer
from biicode.common.exception import BiiServiceException
from biicode.common.model.symbolic.block_version_table import BlockVersionTable
from biicode.common.find.finder_result import FinderResult
from biicode.common.utils.bii_logging import logger
from biicode.common.model.symbolic.block_version import BlockVersion
from biicode.common.model.renames import Renames
from biicode.common.model.symbolic.reference import ReferencedResources
from biicode.common.rest.rest_return_mapping import getExceptionFromHttpError
from biicode.common.api.biiapi import BiiAPI
from biicode.common.settings.osinfo import OSInfo
from biicode.common.model.server_info import ServerInfo
from biicode.common.model.cells import CellDeserializer
from biicode.common.model.content import ContentDeserializer
from biicode.common.model.brl.block_cell_name import BlockCellName
from biicode.common.diffmerge.changes import ChangesDeserializer
from biicode.common.model.brl.cell_name import CellName
from biicode.common.model.resource import ResourceDeserializer
from biicode.common.model.block_delta import BlockDelta
from biicode.common.model.block_info import BlockInfo
from biicode.client.rest.rest_api import JWTAuth
from requests.auth import HTTPBasicAuth
from biicode.common.utils.bson_encoding import decode_bson, encode_bson
from biicode.common.api.ui import BiiResponse
class BiiRestApiClient(RestApiClient, BiiAPI):
'''
Communication with server remote REST API
Satisfaction of BiiAPI Interface it's not necessary. Its
fully implemented in BiiApiAuthManager
'''
version = "v1"
authorized_functions = {
'get_published_resources': {'pattern': '/get_published_resources', 'method': "POST"},
'publish': {'pattern': '/publish', 'method': "POST"},
'upload': {'pattern': '/upload', 'method': "POST"},
'require_auth': {'pattern': '/require_auth', 'method': "GET"},
'get_dep_table': {'pattern': '/users/:user_name/blocks/:block_name/branches/:branch_name/versions/:version/block_version_table/',
'method': "GET"},
'get_cells_snapshot': {'pattern': '/cells_snapshot', 'method': "POST"},
'find': {'pattern': '/finder_result', 'method': "POST"},
'diff': {'pattern': '/diff', 'method': "POST"},
'get_renames': {'pattern': '/renames', 'method': "POST"},
'get_block_info': {'pattern': '/users/:user_name/blocks/:block_name/branches/:branch_name/info', 'method': "GET"},
'get_server_info': {'pattern': '/get_server_info', 'method': "POST"},
'authenticate': {'pattern': '/authenticate', 'method': "GET"}, # Sends user and password by basic http, other methods sends user + token
'get_version_delta_info': {'pattern': '/users/:user_name/blocks/:block_name/branches/:branch_name/version/:version/delta_info', 'method': "GET"},
'get_version_by_tag': {'pattern': '/users/:user_name/blocks/:block_name/branches/:branch_name/tag/:tag', 'method': "GET"},
}
def __init__(self, base_url):
self.base_url = base_url
self.token = None # Anonymous until setted
self.custom_headers = {} # Can set custom headers to each request
logger.debug("Init rest api client pointing to: %s" % self.base_url)
super(BiiRestApiClient, self).__init__(
self.base_url + "/" + BiiRestApiClient.version,
self.authorized_functions)
################### REST METHODS ########################
def get_published_resources(self, references):
serialized_data = Serializer().build(("data", references))
return self.bson_jwt_call('get_published_resources', data=serialized_data,
deserializer=ReferencedResources)
def publish(self, publish_request):
data = Serializer().build(("data", publish_request))
return self.bson_jwt_call('publish', data=data, deserializer=BlockVersion)
def get_dep_table(self, block_version):
block, time, _ = block_version
owner_name = block.owner
block_name = block.block_name
branch_name = block.branch
params = {"user_name": owner_name, "block_name": block_name,
"branch_name": branch_name, "version": time}
deserializer = BlockVersionTable
return self.bson_jwt_call('get_dep_table', url_params=params, deserializer=deserializer)
def get_cells_snapshot(self, block_version):
data = Serializer().build(("data", block_version))
return self.bson_jwt_call('get_cells_snapshot', data=data,
deserializer=ListDeserializer(CellName))
def find(self, finder_request, response):
data = Serializer().build(("data", finder_request))
return self.bson_jwt_call('find', data=data, deserializer=FinderResult, response=response)
def diff(self, base, other):
data = Serializer().build(("base", base),
("other", other))
values_deserializer = ResourceDeserializer(CellDeserializer(BlockCellName),
ContentDeserializer(BlockCellName))
deserializer = ChangesDeserializer(CellName, values_deserializer)
return self.bson_jwt_call('diff', data=data, deserializer=deserializer)
def get_renames(self, brl_block, t1, t2):
data = Serializer().build(("block", brl_block),
("t1", t1),
("t2", t2))
return self.bson_jwt_call('get_renames', data=data, deserializer=Renames)
def get_block_info(self, brl_block):
owner_name = brl_block.owner
block_name = brl_block.block_name
branch_name = brl_block.branch
url_params = {"user_name": owner_name,
"block_name": block_name,
"branch_name": branch_name}
return self.bson_jwt_call('get_block_info', url_params=url_params, deserializer=BlockInfo)
def get_version_delta_info(self, block_version):
"""Returns the last blockversion"""
brl, time, _ = block_version
url_params = {"user_name": brl.owner,
"block_name": brl.block_name,
"branch_name": brl.branch,
"version": time
}
return self.bson_jwt_call('get_version_delta_info',
url_params=url_params, deserializer=BlockDelta)
def get_version_by_tag(self, brl_block, version_tag):
"""Given a BlockVersion that has a tag but not a time returns a complete BlockVersion"""
assert version_tag is not None
url_params = {"user_name": brl_block.owner,
"block_name": brl_block.block_name,
"branch_name": brl_block.branch,
"tag": version_tag
}
return self.bson_jwt_call('get_version_by_tag',
url_params=url_params, deserializer=BlockVersion)
def get_server_info(self):
"""Gets a ServerInfo and sends os_info + client version to server"""
os_info = OSInfo.capture()
from biicode.common import __version__
data = (os_info, str(__version__))
serialized_data = Serializer().build(("data", data))
info = self.bson_jwt_call('get_server_info', data=serialized_data,
deserializer=ServerInfo, timeout=1)
return info
def require_auth(self):
info = self.bson_jwt_call('require_auth')
return info
def authenticate(self, user, password):
'''Sends user + password to get a token'''
token = self.basic_auth_call(user, password, "authenticate")
return token
################### END REST METHODS ########################
def bson_jwt_call(self, function_name, deserializer=None, url_params={}, data=None,
headers=None, response=None, timeout=None):
# If we dont have token, send without jwtauth (anonymous)
logger.debug("JWT Call %s" % str(function_name))
auth = JWTAuth(self.token) if self.token else None
headers = headers or {}
headers.update(self.custom_headers)
headers['Content-Type'] = 'application/bson'
if data is not None:
data = str(encode_bson(data))
return self.call(function_name, url_params=url_params, data=data, headers=headers,
auth=auth, deserializer=deserializer, response=response, timeout=timeout)
def basic_auth_call(self, user, password, function_name, url_params={},
data=None, headers=None, deserializer=None):
auth = HTTPBasicAuth(user, password)
return self.call(function_name, url_params=url_params,
data=data, headers=headers, auth=auth,
deserializer=deserializer)
def call(self, *args, **kwargs):
deserializer = kwargs.pop("deserializer", None)
response = kwargs.pop("response", None)
ret = super(BiiRestApiClient, self).call(*args, **kwargs)
return BiiRestApiClient.deserialize_return(ret, deserializer, response)
@staticmethod
def decode_return_content(res, response=None):
if 'content-type' in res.headers and res.headers['content-type'] == "application/bson":
tmp = decode_bson(res.content)
if response is not None:
response_server = BiiResponse.deserialize(tmp["info"])
response_server.biiout(response)
return tmp["return"]
else:
return res.content
@staticmethod
def deserialize_return(res, deserializer=None, response=None):
'''Returns data deserialized and biiresponse object or
raises an exception with biiresponse info'''
exc_kls = getExceptionFromHttpError(res.status_code)
logger.debug("Exception to throw for this return: %s" % str(exc_kls))
logger.debug("Content Type: %s" % str(res.headers.get('content-type', "")))
#logger.debug("Response: %s" % str(res.content))
data = BiiRestApiClient.decode_return_content(res, response)
if exc_kls is None:
if deserializer is not None:
try:
return deserializer.deserialize(data)
except KeyError: # TODO: Check if better capture any exception
raise BiiServiceException("Error handling server response")
else:
return data
else:
if 'content-type' in res.headers and "text/html" in res.headers['content-type']:
logger.debug("Can't process html as output")
raise exc_kls(data)
|
|
"""Constants for 1-Wire integration."""
from pi1wire import InvalidCRCException, UnsupportResponseException
from pyownet.protocol import Error as ProtocolError
from homeassistant.components.binary_sensor import BinarySensorDeviceClass
from homeassistant.components.onewire.const import (
DOMAIN,
MANUFACTURER_EDS,
MANUFACTURER_HOBBYBOARDS,
MANUFACTURER_MAXIM,
Platform,
)
from homeassistant.components.sensor import (
ATTR_STATE_CLASS,
SensorDeviceClass,
SensorStateClass,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_NAME,
ATTR_STATE,
ATTR_UNIT_OF_MEASUREMENT,
ATTR_VIA_DEVICE,
ELECTRIC_POTENTIAL_VOLT,
LIGHT_LUX,
PERCENTAGE,
PRESSURE_CBAR,
PRESSURE_MBAR,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
TEMP_CELSIUS,
)
from homeassistant.helpers.entity import EntityCategory
ATTR_DEFAULT_DISABLED = "default_disabled"
ATTR_DEVICE_FILE = "device_file"
ATTR_DEVICE_INFO = "device_info"
ATTR_ENTITY_CATEGORY = "entity_category"
ATTR_INJECT_READS = "inject_reads"
ATTR_UNIQUE_ID = "unique_id"
ATTR_UNKNOWN_DEVICE = "unknown_device"
FIXED_ATTRIBUTES = (
ATTR_DEVICE_CLASS,
ATTR_STATE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
)
MOCK_OWPROXY_DEVICES = {
"00.111111111111": {
ATTR_INJECT_READS: [
b"", # read device type
],
ATTR_UNKNOWN_DEVICE: True,
},
"05.111111111111": {
ATTR_INJECT_READS: [
b"DS2405", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "05.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2405",
ATTR_NAME: "05.111111111111",
},
Platform.SWITCH: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.05_111111111111_pio",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/05.111111111111/PIO",
},
],
},
"10.111111111111": {
ATTR_INJECT_READS: [
b"DS18S20", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "10.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS18S20",
ATTR_NAME: "10.111111111111",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.my_ds18b20_temperature",
ATTR_INJECT_READS: b" 25.123",
ATTR_STATE: "25.1",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/10.111111111111/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"12.111111111111": {
ATTR_INJECT_READS: [
b"DS2406", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "12.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2406",
ATTR_NAME: "12.111111111111",
},
Platform.BINARY_SENSOR: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.12_111111111111_sensed_a",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/12.111111111111/sensed.A",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.12_111111111111_sensed_b",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/12.111111111111/sensed.B",
},
],
Platform.SENSOR: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.12_111111111111_temperature",
ATTR_INJECT_READS: b" 25.123",
ATTR_STATE: "25.1",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/12.111111111111/TAI8570/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: SensorDeviceClass.PRESSURE,
ATTR_ENTITY_ID: "sensor.12_111111111111_pressure",
ATTR_INJECT_READS: b" 1025.123",
ATTR_STATE: "1025.1",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/12.111111111111/TAI8570/pressure",
ATTR_UNIT_OF_MEASUREMENT: PRESSURE_MBAR,
},
],
Platform.SWITCH: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.12_111111111111_pio_a",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/12.111111111111/PIO.A",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.12_111111111111_pio_b",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/12.111111111111/PIO.B",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.12_111111111111_latch_a",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/12.111111111111/latch.A",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.12_111111111111_latch_b",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/12.111111111111/latch.B",
},
],
},
"1D.111111111111": {
ATTR_INJECT_READS: [
b"DS2423", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "1D.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2423",
ATTR_NAME: "1D.111111111111",
},
Platform.SENSOR: [
{
ATTR_ENTITY_ID: "sensor.1d_111111111111_counter_a",
ATTR_INJECT_READS: b" 251123",
ATTR_STATE: "251123",
ATTR_STATE_CLASS: SensorStateClass.TOTAL_INCREASING,
ATTR_UNIQUE_ID: "/1D.111111111111/counter.A",
ATTR_UNIT_OF_MEASUREMENT: "count",
},
{
ATTR_ENTITY_ID: "sensor.1d_111111111111_counter_b",
ATTR_INJECT_READS: b" 248125",
ATTR_STATE: "248125",
ATTR_STATE_CLASS: SensorStateClass.TOTAL_INCREASING,
ATTR_UNIQUE_ID: "/1D.111111111111/counter.B",
ATTR_UNIT_OF_MEASUREMENT: "count",
},
],
},
"1F.111111111111": {
ATTR_INJECT_READS: [
b"DS2409", # read device type
],
ATTR_DEVICE_INFO: [
{
ATTR_IDENTIFIERS: {(DOMAIN, "1F.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2409",
ATTR_NAME: "1F.111111111111",
},
{
ATTR_IDENTIFIERS: {(DOMAIN, "1D.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2423",
ATTR_NAME: "1D.111111111111",
ATTR_VIA_DEVICE: (DOMAIN, "1F.111111111111"),
},
],
"branches": {
"aux": {},
"main": {
"1D.111111111111": {
ATTR_INJECT_READS: [
b"DS2423", # read device type
],
Platform.SENSOR: [
{
ATTR_DEVICE_FILE: "/1F.111111111111/main/1D.111111111111/counter.A",
ATTR_ENTITY_ID: "sensor.1d_111111111111_counter_a",
ATTR_INJECT_READS: b" 251123",
ATTR_STATE: "251123",
ATTR_STATE_CLASS: SensorStateClass.TOTAL_INCREASING,
ATTR_UNIQUE_ID: "/1D.111111111111/counter.A",
ATTR_UNIT_OF_MEASUREMENT: "count",
},
{
ATTR_DEVICE_FILE: "/1F.111111111111/main/1D.111111111111/counter.B",
ATTR_ENTITY_ID: "sensor.1d_111111111111_counter_b",
ATTR_INJECT_READS: b" 248125",
ATTR_STATE: "248125",
ATTR_STATE_CLASS: SensorStateClass.TOTAL_INCREASING,
ATTR_UNIQUE_ID: "/1D.111111111111/counter.B",
ATTR_UNIT_OF_MEASUREMENT: "count",
},
],
},
},
},
},
"22.111111111111": {
ATTR_INJECT_READS: [
b"DS1822", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "22.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS1822",
ATTR_NAME: "22.111111111111",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.22_111111111111_temperature",
ATTR_INJECT_READS: ProtocolError,
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/22.111111111111/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"26.111111111111": {
ATTR_INJECT_READS: [
b"DS2438", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "26.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2438",
ATTR_NAME: "26.111111111111",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.26_111111111111_temperature",
ATTR_INJECT_READS: b" 25.123",
ATTR_STATE: "25.1",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
ATTR_ENTITY_ID: "sensor.26_111111111111_humidity",
ATTR_INJECT_READS: b" 72.7563",
ATTR_STATE: "72.8",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/humidity",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
ATTR_ENTITY_ID: "sensor.26_111111111111_humidity_hih3600",
ATTR_INJECT_READS: b" 73.7563",
ATTR_STATE: "73.8",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/HIH3600/humidity",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
ATTR_ENTITY_ID: "sensor.26_111111111111_humidity_hih4000",
ATTR_INJECT_READS: b" 74.7563",
ATTR_STATE: "74.8",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/HIH4000/humidity",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
ATTR_ENTITY_ID: "sensor.26_111111111111_humidity_hih5030",
ATTR_INJECT_READS: b" 75.7563",
ATTR_STATE: "75.8",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/HIH5030/humidity",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
ATTR_ENTITY_ID: "sensor.26_111111111111_humidity_htm1735",
ATTR_INJECT_READS: ProtocolError,
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/HTM1735/humidity",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: SensorDeviceClass.PRESSURE,
ATTR_ENTITY_ID: "sensor.26_111111111111_pressure",
ATTR_INJECT_READS: b" 969.265",
ATTR_STATE: "969.3",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/B1-R1-A/pressure",
ATTR_UNIT_OF_MEASUREMENT: PRESSURE_MBAR,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: SensorDeviceClass.ILLUMINANCE,
ATTR_ENTITY_ID: "sensor.26_111111111111_illuminance",
ATTR_INJECT_READS: b" 65.8839",
ATTR_STATE: "65.9",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/S3-R1-A/illuminance",
ATTR_UNIT_OF_MEASUREMENT: LIGHT_LUX,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: SensorDeviceClass.VOLTAGE,
ATTR_ENTITY_ID: "sensor.26_111111111111_voltage_vad",
ATTR_INJECT_READS: b" 2.97",
ATTR_STATE: "3.0",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/VAD",
ATTR_UNIT_OF_MEASUREMENT: ELECTRIC_POTENTIAL_VOLT,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: SensorDeviceClass.VOLTAGE,
ATTR_ENTITY_ID: "sensor.26_111111111111_voltage_vdd",
ATTR_INJECT_READS: b" 4.74",
ATTR_STATE: "4.7",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/VDD",
ATTR_UNIT_OF_MEASUREMENT: ELECTRIC_POTENTIAL_VOLT,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: SensorDeviceClass.VOLTAGE,
ATTR_ENTITY_ID: "sensor.26_111111111111_vis",
ATTR_INJECT_READS: b" 0.12",
ATTR_STATE: "0.1",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/26.111111111111/vis",
ATTR_UNIT_OF_MEASUREMENT: ELECTRIC_POTENTIAL_VOLT,
},
],
Platform.SWITCH: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_CATEGORY: EntityCategory.CONFIG,
ATTR_ENTITY_ID: "switch.26_111111111111_iad",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/26.111111111111/IAD",
},
],
},
"28.111111111111": {
ATTR_INJECT_READS: [
b"DS18B20", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "28.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS18B20",
ATTR_NAME: "28.111111111111",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.28_111111111111_temperature",
ATTR_INJECT_READS: b" 26.984",
ATTR_STATE: "27.0",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/28.111111111111/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"29.111111111111": {
ATTR_INJECT_READS: [
b"DS2408", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "29.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2408",
ATTR_NAME: "29.111111111111",
},
Platform.BINARY_SENSOR: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_0",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.0",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_1",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.1",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_2",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.2",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_3",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.3",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_4",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.4",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_5",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.5",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_6",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.6",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.29_111111111111_sensed_7",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/sensed.7",
},
],
Platform.SWITCH: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_0",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.0",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_1",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.1",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_2",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.2",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_3",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.3",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_4",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.4",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_5",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.5",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_6",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.6",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_pio_7",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/PIO.7",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_0",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/latch.0",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_1",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/latch.1",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_2",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/latch.2",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_3",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/latch.3",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_4",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/latch.4",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_5",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/latch.5",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_6",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/29.111111111111/latch.6",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.29_111111111111_latch_7",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/29.111111111111/latch.7",
},
],
},
"3A.111111111111": {
ATTR_INJECT_READS: [
b"DS2413", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "3A.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS2413",
ATTR_NAME: "3A.111111111111",
},
Platform.BINARY_SENSOR: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.3a_111111111111_sensed_a",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/3A.111111111111/sensed.A",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "binary_sensor.3a_111111111111_sensed_b",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/3A.111111111111/sensed.B",
},
],
Platform.SWITCH: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.3a_111111111111_pio_a",
ATTR_INJECT_READS: b" 1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/3A.111111111111/PIO.A",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_ID: "switch.3a_111111111111_pio_b",
ATTR_INJECT_READS: b" 0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/3A.111111111111/PIO.B",
},
],
},
"3B.111111111111": {
ATTR_INJECT_READS: [
b"DS1825", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "3B.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS1825",
ATTR_NAME: "3B.111111111111",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.3b_111111111111_temperature",
ATTR_INJECT_READS: b" 28.243",
ATTR_STATE: "28.2",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/3B.111111111111/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"42.111111111111": {
ATTR_INJECT_READS: [
b"DS28EA00", # read device type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "42.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "DS28EA00",
ATTR_NAME: "42.111111111111",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.42_111111111111_temperature",
ATTR_INJECT_READS: b" 29.123",
ATTR_STATE: "29.1",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/42.111111111111/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"EF.111111111111": {
ATTR_INJECT_READS: [
b"HobbyBoards_EF", # read type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "EF.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_HOBBYBOARDS,
ATTR_MODEL: "HobbyBoards_EF",
ATTR_NAME: "EF.111111111111",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
ATTR_ENTITY_ID: "sensor.ef_111111111111_humidity",
ATTR_INJECT_READS: b" 67.745",
ATTR_STATE: "67.7",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/EF.111111111111/humidity/humidity_corrected",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
ATTR_ENTITY_ID: "sensor.ef_111111111111_humidity_raw",
ATTR_INJECT_READS: b" 65.541",
ATTR_STATE: "65.5",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/EF.111111111111/humidity/humidity_raw",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.ef_111111111111_temperature",
ATTR_INJECT_READS: b" 25.123",
ATTR_STATE: "25.1",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/EF.111111111111/humidity/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"EF.111111111112": {
ATTR_INJECT_READS: [
b"HB_MOISTURE_METER", # read type
b" 1", # read is_leaf_0
b" 1", # read is_leaf_1
b" 0", # read is_leaf_2
b" 0", # read is_leaf_3
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "EF.111111111112")},
ATTR_MANUFACTURER: MANUFACTURER_HOBBYBOARDS,
ATTR_MODEL: "HB_MOISTURE_METER",
ATTR_NAME: "EF.111111111112",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
ATTR_ENTITY_ID: "sensor.ef_111111111112_wetness_0",
ATTR_INJECT_READS: b" 41.745",
ATTR_STATE: "41.7",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/sensor.0",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
ATTR_ENTITY_ID: "sensor.ef_111111111112_wetness_1",
ATTR_INJECT_READS: b" 42.541",
ATTR_STATE: "42.5",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/sensor.1",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEVICE_CLASS: SensorDeviceClass.PRESSURE,
ATTR_ENTITY_ID: "sensor.ef_111111111112_moisture_2",
ATTR_INJECT_READS: b" 43.123",
ATTR_STATE: "43.1",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/sensor.2",
ATTR_UNIT_OF_MEASUREMENT: PRESSURE_CBAR,
},
{
ATTR_DEVICE_CLASS: SensorDeviceClass.PRESSURE,
ATTR_ENTITY_ID: "sensor.ef_111111111112_moisture_3",
ATTR_INJECT_READS: b" 44.123",
ATTR_STATE: "44.1",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/sensor.3",
ATTR_UNIT_OF_MEASUREMENT: PRESSURE_CBAR,
},
],
Platform.SWITCH: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_CATEGORY: EntityCategory.CONFIG,
ATTR_ENTITY_ID: "switch.ef_111111111112_leaf_sensor_0_enable",
ATTR_INJECT_READS: b"1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/is_leaf.0",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_CATEGORY: EntityCategory.CONFIG,
ATTR_ENTITY_ID: "switch.ef_111111111112_leaf_sensor_1_enable",
ATTR_INJECT_READS: b"1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/is_leaf.1",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_CATEGORY: EntityCategory.CONFIG,
ATTR_ENTITY_ID: "switch.ef_111111111112_leaf_sensor_2_enable",
ATTR_INJECT_READS: b"0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/is_leaf.2",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_CATEGORY: EntityCategory.CONFIG,
ATTR_ENTITY_ID: "switch.ef_111111111112_leaf_sensor_3_enable",
ATTR_INJECT_READS: b"0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/is_leaf.3",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_CATEGORY: EntityCategory.CONFIG,
ATTR_ENTITY_ID: "switch.ef_111111111112_moisture_sensor_0_enable",
ATTR_INJECT_READS: b"1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/is_moisture.0",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_CATEGORY: EntityCategory.CONFIG,
ATTR_ENTITY_ID: "switch.ef_111111111112_moisture_sensor_1_enable",
ATTR_INJECT_READS: b"1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/is_moisture.1",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_CATEGORY: EntityCategory.CONFIG,
ATTR_ENTITY_ID: "switch.ef_111111111112_moisture_sensor_2_enable",
ATTR_INJECT_READS: b"0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/is_moisture.2",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_CATEGORY: EntityCategory.CONFIG,
ATTR_ENTITY_ID: "switch.ef_111111111112_moisture_sensor_3_enable",
ATTR_INJECT_READS: b"0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/EF.111111111112/moisture/is_moisture.3",
},
],
},
"EF.111111111113": {
ATTR_INJECT_READS: [
b"HB_HUB", # read type
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "EF.111111111113")},
ATTR_MANUFACTURER: MANUFACTURER_HOBBYBOARDS,
ATTR_MODEL: "HB_HUB",
ATTR_NAME: "EF.111111111113",
},
Platform.BINARY_SENSOR: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: BinarySensorDeviceClass.PROBLEM,
ATTR_ENTITY_CATEGORY: EntityCategory.DIAGNOSTIC,
ATTR_ENTITY_ID: "binary_sensor.ef_111111111113_hub_short_on_branch_0",
ATTR_INJECT_READS: b"1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/EF.111111111113/hub/short.0",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: BinarySensorDeviceClass.PROBLEM,
ATTR_ENTITY_CATEGORY: EntityCategory.DIAGNOSTIC,
ATTR_ENTITY_ID: "binary_sensor.ef_111111111113_hub_short_on_branch_1",
ATTR_INJECT_READS: b"0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/EF.111111111113/hub/short.1",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: BinarySensorDeviceClass.PROBLEM,
ATTR_ENTITY_CATEGORY: EntityCategory.DIAGNOSTIC,
ATTR_ENTITY_ID: "binary_sensor.ef_111111111113_hub_short_on_branch_2",
ATTR_INJECT_READS: b"1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/EF.111111111113/hub/short.2",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: BinarySensorDeviceClass.PROBLEM,
ATTR_ENTITY_CATEGORY: EntityCategory.DIAGNOSTIC,
ATTR_ENTITY_ID: "binary_sensor.ef_111111111113_hub_short_on_branch_3",
ATTR_INJECT_READS: b"0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/EF.111111111113/hub/short.3",
},
],
Platform.SWITCH: [
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_CATEGORY: EntityCategory.CONFIG,
ATTR_ENTITY_ID: "switch.ef_111111111113_hub_branch_0_enable",
ATTR_INJECT_READS: b"1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/EF.111111111113/hub/branch.0",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_CATEGORY: EntityCategory.CONFIG,
ATTR_ENTITY_ID: "switch.ef_111111111113_hub_branch_1_enable",
ATTR_INJECT_READS: b"0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/EF.111111111113/hub/branch.1",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_CATEGORY: EntityCategory.CONFIG,
ATTR_ENTITY_ID: "switch.ef_111111111113_hub_branch_2_enable",
ATTR_INJECT_READS: b"1",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "/EF.111111111113/hub/branch.2",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_ENTITY_CATEGORY: EntityCategory.CONFIG,
ATTR_ENTITY_ID: "switch.ef_111111111113_hub_branch_3_enable",
ATTR_INJECT_READS: b"0",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "/EF.111111111113/hub/branch.3",
},
],
},
"7E.111111111111": {
ATTR_INJECT_READS: [
b"EDS", # read type
b"EDS0068", # read device_type - note EDS specific
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "7E.111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_EDS,
ATTR_MODEL: "EDS0068",
ATTR_NAME: "7E.111111111111",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.7e_111111111111_temperature",
ATTR_INJECT_READS: b" 13.9375",
ATTR_STATE: "13.9",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/7E.111111111111/EDS0068/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
{
ATTR_DEVICE_CLASS: SensorDeviceClass.PRESSURE,
ATTR_ENTITY_ID: "sensor.7e_111111111111_pressure",
ATTR_INJECT_READS: b" 1012.21",
ATTR_STATE: "1012.2",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/7E.111111111111/EDS0068/pressure",
ATTR_UNIT_OF_MEASUREMENT: PRESSURE_MBAR,
},
{
ATTR_DEVICE_CLASS: SensorDeviceClass.ILLUMINANCE,
ATTR_ENTITY_ID: "sensor.7e_111111111111_illuminance",
ATTR_INJECT_READS: b" 65.8839",
ATTR_STATE: "65.9",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/7E.111111111111/EDS0068/light",
ATTR_UNIT_OF_MEASUREMENT: LIGHT_LUX,
},
{
ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
ATTR_ENTITY_ID: "sensor.7e_111111111111_humidity",
ATTR_INJECT_READS: b" 41.375",
ATTR_STATE: "41.4",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/7E.111111111111/EDS0068/humidity",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
],
},
"7E.222222222222": {
ATTR_INJECT_READS: [
b"EDS", # read type
b"EDS0066", # read device_type - note EDS specific
],
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "7E.222222222222")},
ATTR_MANUFACTURER: MANUFACTURER_EDS,
ATTR_MODEL: "EDS0066",
ATTR_NAME: "7E.222222222222",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.7e_222222222222_temperature",
ATTR_INJECT_READS: b" 13.9375",
ATTR_STATE: "13.9",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/7E.222222222222/EDS0066/temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
{
ATTR_DEVICE_CLASS: SensorDeviceClass.PRESSURE,
ATTR_ENTITY_ID: "sensor.7e_222222222222_pressure",
ATTR_INJECT_READS: b" 1012.21",
ATTR_STATE: "1012.2",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/7E.222222222222/EDS0066/pressure",
ATTR_UNIT_OF_MEASUREMENT: PRESSURE_MBAR,
},
],
},
}
MOCK_SYSBUS_DEVICES = {
"00-111111111111": {
ATTR_UNKNOWN_DEVICE: True,
},
"10-111111111111": {
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "10-111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "10",
ATTR_NAME: "10-111111111111",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.my_ds18b20_temperature",
ATTR_INJECT_READS: 25.123,
ATTR_STATE: "25.1",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/sys/bus/w1/devices/10-111111111111/w1_slave",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"22-111111111111": {
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "22-111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "22",
ATTR_NAME: "22-111111111111",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.22_111111111111_temperature",
ATTR_INJECT_READS: FileNotFoundError,
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/sys/bus/w1/devices/22-111111111111/w1_slave",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"28-111111111111": {
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "28-111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "28",
ATTR_NAME: "28-111111111111",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.28_111111111111_temperature",
ATTR_INJECT_READS: InvalidCRCException,
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/sys/bus/w1/devices/28-111111111111/w1_slave",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"3B-111111111111": {
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "3B-111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "3B",
ATTR_NAME: "3B-111111111111",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.3b_111111111111_temperature",
ATTR_INJECT_READS: 29.993,
ATTR_STATE: "30.0",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/sys/bus/w1/devices/3B-111111111111/w1_slave",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"42-111111111111": {
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "42-111111111111")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "42",
ATTR_NAME: "42-111111111111",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.42_111111111111_temperature",
ATTR_INJECT_READS: UnsupportResponseException,
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/sys/bus/w1/devices/42-111111111111/w1_slave",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"42-111111111112": {
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "42-111111111112")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "42",
ATTR_NAME: "42-111111111112",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.42_111111111112_temperature",
ATTR_INJECT_READS: [UnsupportResponseException] * 9 + [27.993],
ATTR_STATE: "28.0",
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/sys/bus/w1/devices/42-111111111112/w1_slave",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
"42-111111111113": {
ATTR_DEVICE_INFO: {
ATTR_IDENTIFIERS: {(DOMAIN, "42-111111111113")},
ATTR_MANUFACTURER: MANUFACTURER_MAXIM,
ATTR_MODEL: "42",
ATTR_NAME: "42-111111111113",
},
Platform.SENSOR: [
{
ATTR_DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ATTR_ENTITY_ID: "sensor.42_111111111113_temperature",
ATTR_INJECT_READS: [UnsupportResponseException] * 10 + [27.993],
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT,
ATTR_UNIQUE_ID: "/sys/bus/w1/devices/42-111111111113/w1_slave",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
],
},
}
|
|
import re
import os, sys
from __exceptions__ import formattedException
isUsingWindows = (sys.platform.lower().find('win') > -1) and (os.name.lower() == 'nt')
isUsingMacOSX = (sys.platform.lower().find('darwin') > -1) and (os.name.find('posix') > -1) and (not isUsingWindows)
isUsingLinux = (sys.platform.lower().find('linux') > -1) and (os.name.find('posix') > -1) and (not isUsingWindows) and (not isUsingMacOSX)
def isString(s):
return (isinstance(s,str)) or (isinstance(s,unicode))
isStringValid = lambda s:(s) and isString(s) and (len(s) > 0)
def isBoolean(s):
return (isinstance(s,bool))
def isBooleanString(s):
d = {'True': True, 'False': False,}
t = str(s).lower().capitalize()
if (isString(t) and (t in d.keys())):
return d[t]
return False
def isInteger(s):
return (isinstance(s,int))
def isFloat(s):
return (isinstance(s,float))
def isDate(s):
from datetime import date
return isinstance(s,date)
def isSimpleDict(s):
return (isinstance(s,dict))
def isDict(s):
_has_key = False
try:
_has_key = callable(s.has_key)
except:
_has_key = False
return (isSimpleDict(s)) or (_has_key)
def isList(obj):
try:
return callable(obj.append)
except:
pass
return False
def isTuple(obj):
try:
if (len(obj) == 0):
obj += (1)
if (len(obj) > 0):
obj[0] = obj[0]
except:
return True
return False
def isIterable(obj):
try:
return hasattr(obj,'__iter__')
except TypeError:
return False
def md5(plain):
import hashlib
m = hashlib.md5()
m.update(plain)
return m.hexdigest()
def handle_services(cfgname,payload,logger):
__re2__ = re.compile("\Ahost[0-9]*_")
__re3__ = re.compile("\Aservice[0-9]*_")
__re4__ = re.compile("\Acommand[0-9]*_")
__services__ = []
__status__ = ''
if (cfgname):
logger.debug('DEBUG.1: payload.keys()=%s' % (payload.keys()))
items = [k for k in payload.keys() if (__re2__.search(k))]
logger.debug('DEBUG.1: items=%s' % (items))
hosts = SmartObject()
for item in items:
toks = item.split('_')
if (hosts[toks[0]] is None):
hosts[toks[0]] = SmartObject()
hosts[toks[0]]['_'.join(toks[1:])] = payload[item]
logger.debug('DEBUG.2: hosts=%s' % (hosts.__dict__))
items = [k for k in payload.keys() if (__re4__.search(k))]
logger.debug('DEBUG.4: items=%s' % (items))
commands = SmartObject()
for item in items:
toks = item.split('_')
if (commands[toks[0]] is None):
commands[toks[0]] = SmartObject()
commands[toks[0]]['_'.join(toks[1:])] = payload[item]
logger.debug('DEBUG.5: commands=%s' % (commands.__dict__))
items = [k for k in payload.keys() if (__re3__.search(k))]
logger.debug('DEBUG.3: items=%s' % (items))
services = SmartObject()
toks = None
count = 0
for item in items:
toks = item.split('_')
if (services[toks[0]] is None):
services[toks[0]] = SmartObject()
count += 1
services[toks[0]]['_'.join(toks[1:])] = payload[item]
if (isList(payload.partition_names)):
logger.debug('DEBUG.4.0: payload.partition_names=%s' % (payload.partition_names))
for pname in payload.partition_names:
logger.debug('DEBUG.4.0.1: pname=%s' % (pname))
sname = 'service%s' % (count+1)
if (services[sname] is None):
services[sname] = SmartObject()
first_host = None
for k,v in hosts.__dict__.iteritems():
logger.debug('DEBUG.4.1: %s=%s [%s]' % (k,v,(k not in ['__dict__'])))
if (k not in ['__dict__']):
first_host = v
logger.debug('DEBUG.4.1.1: first_host=%s' % (first_host))
break
first_command = None
for k,v in commands.__dict__.iteritems():
logger.debug('DEBUG.4.2: %s=%s [%s]' % (k,v,(k not in ['__dict__'])))
if (k not in ['__dict__']):
first_command = v
logger.debug('DEBUG.4.2.1: first_command=%s' % (first_command))
break
services[sname]["use"] = "generic-service"
services[sname]["host_name"] = first_host['host_name'] if (first_host) else 'UNKNOWN'
services[sname]["service_description"] = pname
services[sname]["active_checks_enabled"] = "0"
services[sname]["passive_checks_enabled"] = "1"
services[sname]["check_command"] = first_command['command_name'] if (first_command) else 'dummy_command'
count += 1
logger.debug('DEBUG.4.3: services[%s]=%s' % (sname,services[sname]))
logger.debug('DEBUG.4: services=%s' % (services.__dict__))
if (len(items) > 0):
logger.debug('DEBUG: BEGIN:')
preamble = 'define %s{'
fOut = open(cfgname,'w')
try:
def emit_object_using(bucket,name,pre):
longest = -1
for k,v in bucket.__dict__.iteritems():
logger.debug('DEBUG (longest.1): %s=%s' % (k,v))
if (k not in ['__dict__']):
try:
for kk,vv in v.__dict__.iteritems():
if (kk not in ['__dict__']):
longest = max(longest,len(kk))
logger.debug('DEBUG (longest.2): longest=%s' % (longest))
except:
pass
longest += 10
logger.debug('DEBUG (longest.3): longest=%s' % (longest))
for k,v in iter(sorted(bucket.__dict__.iteritems())):
logger.debug('DEBUG.1: %s=%s' % (k,v))
if (k not in ['__dict__']):
try:
logger.debug('DEBUG.2: %s=%s' % (k,v))
logger.debug('DEBUG.2.1: %s, %s' % (pre,name))
fOut.write('%s\n' % (pre % (name)))
for kk,vv in v.__dict__.iteritems():
logger.debug('DEBUG.3: (%s)' % ((kk not in ['__dict__'])))
if (kk not in ['__dict__']):
fOut.write('\t%s%s%s\n' % (kk,' '*(longest-len(kk)),vv))
fOut.write('}\n\n')
except Exception, ex:
logger.exception('EXCEPTION: %s' % (formattedException(details=ex)))
emit_object_using(hosts, 'host', preamble)
emit_object_using(commands, 'command', preamble)
emit_object_using(services, 'service', preamble)
except Exception, ex:
logger.exception('EXCEPTION: %s' % (formattedException(details=ex)))
logger.debug('DEBUG: END !!!')
logger.debug('DEBUG: fOut=%s' % (fOut.name))
fOut.flush()
fOut.close()
else:
logger.error('ERROR: Cannot handle_services unless services have been defined in payload.')
else:
logger.error('ERROR: Cannot handle_services with cfgname of "%s".' % (cfgname))
return __status__
def handle_disk_services(cfgname,payload,logger):
__re1__ = re.compile(r"define\s*service.*\{")
__re2__ = re.compile(r"service_description\s*DISK")
__re3__ = re.compile("service[0-9]*_")
__services__ = []
__status__ = ''
if (cfgname) and (os.path.exists(cfgname)):
logger.debug('DEBUG.1: payload.keys()=%s' % (payload.keys()))
items = [k for k in payload.keys() if (__re3__.search(k))]
logger.debug('DEBUG.1: items=%s' % (items))
services = SmartObject()
for item in items:
toks = item.split('_')
if (services[toks[0]] is None):
services[toks[0]] = SmartObject()
services[toks[0]]['_'.join(toks[1:])] = payload[item]
logger.debug('DEBUG.1a: services=%s' % (services.__dict__))
if (len(items) > 0):
__matches__ = False
def collecting(aLine,m,lineNum):
__service__.append(SmartObject(args={'linenum':lineNum,'content':aLine}))
logger.debug('DEBUG.2: collecting=%s' % (len(__service__)))
if (aLine.find('}') > -1):
m = False
logger.debug('DEBUG.3: __matches__=%s' % (__matches__))
return m
fIn = open(cfgname)
try:
lines = fIn.readlines()
__service__ = []
matches2 = None
line_num = 0
for l in lines:
line_num += 1
logger.debug('DEBUG.4: l=%s' % (l))
if (__matches__):
logger.debug('DEBUG.5: matches2=%s' % (matches2))
if (matches2 is None):
matches2 = __re2__.search(l)
logger.debug('DEBUG.6: matches2=%s' % (matches2))
__matches__ = collecting(l,__matches__,line_num)
else:
matches1 = __re1__.search(l)
logger.debug('DEBUG.7: matches1=%s' % (matches1))
if (matches1):
logger.debug('DEBUG.8: matches2=%s' % (matches2))
if (matches2 is not None):
__services__.append(__service__)
matches2 = None
__service__ = []
__matches__ = True
logger.debug('DEBUG.9: __matches__=%s' % (__matches__))
__matches__ = collecting(l,__matches__,line_num)
except Exception, ex:
logger.exception('EXCEPTION: %s' % (formattedException(details=ex)))
fIn.close()
__status__ = 'Found %s service%s in %s to be replaced by %s service%s from payload.' % (len(__services__),'s' if (len(__services__) > 1) else '',cfgname,len(services.__dict__),'s' if (len(services.__dict__) > 1) else '')
logger.debug('DEBUG: %s' % (__status__))
if (len(__services__) > 0):
cfgname_new = cfgname+'.new'
for svc in __services__:
l_begin = svc[0].linenum
l_end = svc[-1].linenum
preamble = ''
assert l_begin < l_end, 'ERROR.1: Check your logic, sir.'
logger.debug('DEBUG: BEGIN:')
fIn = open(cfgname)
fOut = open(cfgname_new,'w')
for i in xrange(0,l_begin):
l = fIn.readline()
if (i < l_begin):
preamble = l
fOut.write(l)
try:
if (0):
for i in xrange(l_begin,l_end):
l = fIn.readline()
logger.debug('DEBUG: SKIPPING: %s' % (l))
for item in svc:
#assert l == item.content, 'ERROR.2: Check your logic, please. Expected (%s) got (%s).' % (item.content,l)
logger.debug('DEBUG: %s' % (item.__dict__))
#l = fIn.readline() # toss this line away
fOut.write(item.content)
if (1):
longest = -1
for k,v in services.__dict__.iteritems():
logger.debug('DEBUG (longest.1): %s=%s' % (k,v))
if (k not in ['__dict__']):
try:
for kk,vv in v.__dict__.iteritems():
if (kk not in ['__dict__']):
longest = max(longest,len(kk))
logger.debug('DEBUG (longest.2): longest=%s' % (longest))
except:
pass
longest += 10
logger.debug('DEBUG (longest.3): longest=%s' % (longest))
for k,v in iter(sorted(services.__dict__.iteritems())):
logger.debug('DEBUG: %s=%s' % (k,v))
l = fIn.readline() # toss this line away
if (k not in ['__dict__']):
try:
for kk,vv in v.__dict__.iteritems():
if (kk not in ['__dict__']):
fOut.write('\t%s%s%s\n' % (kk,' '*(longest-len(kk)),vv))
fOut.write('}\n\n')
fOut.write('%s' % (preamble))
except:
pass
while (1):
l = fIn.readline()
if (not l):
break
fOut.write(l)
except Exception, ex:
logger.exception('EXCEPTION: %s' % (formattedException(details=ex)))
logger.debug('DEBUG: END !!!')
logger.debug('DEBUG: fIn=%s, fOut=%s' % (fIn.name,fOut.name))
fIn.close()
fOut.flush()
fOut.close()
else:
logger.error('ERROR: Cannot handle_services unless services have been defined in payload.')
else:
logger.error('ERROR: Cannot handle_services with cfgname of "%s".' % (cfgname))
return __status__
def typeClassName(obj):
try:
sObj = str(obj.__class__)
except AttributeError:
sObj = str(obj)
except:
return typeName(obj)
toks = sObj.replace('<','').replace('>','').replace("'",'').replace('object at','object_at').split()
if (len([t for t in toks if (t == 'object_at')]) > 0):
pass
return toks[-1]
return toks[0]
def walk(top, topdown=True, onerror=None, rejecting_re=None):
isRejectingRe = typeClassName(rejecting_re) == '_sre.SRE_Pattern'
try:
names = [n for n in os.listdir(top) if (not isRejectingRe) or (isRejectingRe and not rejecting_re.search(n))]
except os.error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if os.path.isdir(os.path.join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = os.path.join(top, name)
if not os.path.islink(path):
for x in walk(path, topdown, onerror, rejecting_re):
yield x
if not topdown:
yield top, dirs, nondirs
def shellexecute(cmd):
results = None
if (isUsingLinux):
try:
infile, outfile, errfile = os.popen3(cmd)
stdout_lines = outfile.readlines()
stderr_lines = errfile.readlines()
results = stdout_lines + stderr_lines
except Exception, ex:
results = formattedException(details=ex)
return results
class SmartObject(object):
def __init__(self,args={}):
'''Populate from a dict object.'''
self.__dict__ = {}
self.fromDict(args)
def fromDict(self, args):
try:
__iter__ = args.iteritems()
except:
__iter__ = []
for ak,av in __iter__:
try:
for k,v in av.iteritems():
self.__dict__['%s_%s' % (ak,k)] = v
except:
self.__dict__[ak] = av
def __str__(self):
ascii_only = lambda s:''.join([ch for ch in s if (ord(ch) >= 32) and (ord(ch) <= 127)])
_vars = []
for k,v in self.__dict__.iteritems():
_vars.append('%s="%s"' % (k,ascii_only(v) if (isinstance(v,str)) else str(v)))
return '(%s) %s' % (str(self.__class__),', '.join(_vars))
def keys(self):
return self.__dict__.keys()
def has_key(self,key):
return self.__dict__.has_key(key)
def iteritems(self):
return [(k,v) for k,v in self.__dict__.iteritems() if (k != '__dict__')]
def __getitem__(self, name):
return self.__getattr__(name)
def __setitem__(self,name,value):
self.__setattr__(name,value)
def __getattr__(self, name):
if (self.__dict__.has_key(name)):
return self.__dict__[name]
else:
return None
def __setattr__(self, name, value):
__is__ = False
try:
__is__ = (value == None)
except:
pass
if (__is__) and (self.__dict__.has_key(name)):
del self.__dict__[name]
else:
self.__dict__[name] = value
if (__name__ == '__main__'):
print md5('plaintext')
|
|
# Copyright 2014 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This module manages the HTTP and HTTPS connections to the backend controllers.
The main class it provides for external use is ServerPool which manages a set
of ServerProxy objects that correspond to individual backend controllers.
The following functionality is handled by this module:
- Translation of rest_* function calls to HTTP/HTTPS calls to the controllers
- Automatic failover between controllers
- SSL Certificate enforcement
- HTTP Authentication
"""
import base64
import httplib
import os
import socket
import ssl
import time
import weakref
import eventlet
import eventlet.corolocal
from oslo.config import cfg
from neutron.common import exceptions
from neutron.common import utils
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.bigswitch.db import consistency_db as cdb
LOG = logging.getLogger(__name__)
# The following are used to invoke the API on the external controller
CAPABILITIES_PATH = "/capabilities"
NET_RESOURCE_PATH = "/tenants/%s/networks"
PORT_RESOURCE_PATH = "/tenants/%s/networks/%s/ports"
ROUTER_RESOURCE_PATH = "/tenants/%s/routers"
ROUTER_INTF_OP_PATH = "/tenants/%s/routers/%s/interfaces"
NETWORKS_PATH = "/tenants/%s/networks/%s"
FLOATINGIPS_PATH = "/tenants/%s/floatingips/%s"
PORTS_PATH = "/tenants/%s/networks/%s/ports/%s"
ATTACHMENT_PATH = "/tenants/%s/networks/%s/ports/%s/attachment"
ROUTERS_PATH = "/tenants/%s/routers/%s"
ROUTER_INTF_PATH = "/tenants/%s/routers/%s/interfaces/%s"
TOPOLOGY_PATH = "/topology"
HEALTH_PATH = "/health"
SWITCHES_PATH = "/switches/%s"
SUCCESS_CODES = range(200, 207)
FAILURE_CODES = [0, 301, 302, 303, 400, 401, 403, 404, 500, 501, 502, 503,
504, 505]
BASE_URI = '/networkService/v1.1'
ORCHESTRATION_SERVICE_ID = 'Neutron v2.0'
HASH_MATCH_HEADER = 'X-BSN-BVS-HASH-MATCH'
REQ_CONTEXT_HEADER = 'X-REQ-CONTEXT'
# error messages
NXNETWORK = 'NXVNS'
HTTP_SERVICE_UNAVAILABLE_RETRY_COUNT = 3
HTTP_SERVICE_UNAVAILABLE_RETRY_INTERVAL = 3
class RemoteRestError(exceptions.NeutronException):
message = _("Error in REST call to remote network "
"controller: %(reason)s")
status = None
def __init__(self, **kwargs):
self.status = kwargs.pop('status', None)
self.reason = kwargs.get('reason')
super(RemoteRestError, self).__init__(**kwargs)
class ServerProxy(object):
"""REST server proxy to a network controller."""
def __init__(self, server, port, ssl, auth, neutron_id, timeout,
base_uri, name, mypool, combined_cert):
self.server = server
self.port = port
self.ssl = ssl
self.base_uri = base_uri
self.timeout = timeout
self.name = name
self.success_codes = SUCCESS_CODES
self.auth = None
self.neutron_id = neutron_id
self.failed = False
self.capabilities = []
# enable server to reference parent pool
self.mypool = mypool
# cache connection here to avoid a SSL handshake for every connection
self.currentconn = None
if auth:
self.auth = 'Basic ' + base64.encodestring(auth).strip()
self.combined_cert = combined_cert
def get_capabilities(self):
try:
body = self.rest_call('GET', CAPABILITIES_PATH)[2]
self.capabilities = jsonutils.loads(body)
except Exception:
LOG.exception(_("Couldn't retrieve capabilities. "
"Newer API calls won't be supported."))
LOG.info(_("The following capabilities were received "
"for %(server)s: %(cap)s"), {'server': self.server,
'cap': self.capabilities})
return self.capabilities
def rest_call(self, action, resource, data='', headers=None,
timeout=False, reconnect=False, hash_handler=None):
uri = self.base_uri + resource
body = jsonutils.dumps(data)
headers = headers or {}
headers['Content-type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['NeutronProxy-Agent'] = self.name
headers['Instance-ID'] = self.neutron_id
headers['Orchestration-Service-ID'] = ORCHESTRATION_SERVICE_ID
if hash_handler:
# this will be excluded on calls that don't need hashes
# (e.g. topology sync, capability checks)
headers[HASH_MATCH_HEADER] = hash_handler.read_for_update()
else:
hash_handler = cdb.HashHandler()
if 'keep-alive' in self.capabilities:
headers['Connection'] = 'keep-alive'
else:
reconnect = True
if self.auth:
headers['Authorization'] = self.auth
LOG.debug(_("ServerProxy: server=%(server)s, port=%(port)d, "
"ssl=%(ssl)r"),
{'server': self.server, 'port': self.port, 'ssl': self.ssl})
LOG.debug(_("ServerProxy: resource=%(resource)s, data=%(data)r, "
"headers=%(headers)r, action=%(action)s"),
{'resource': resource, 'data': data, 'headers': headers,
'action': action})
# unspecified timeout is False because a timeout can be specified as
# None to indicate no timeout.
if timeout is False:
timeout = self.timeout
if timeout != self.timeout:
# need a new connection if timeout has changed
reconnect = True
if not self.currentconn or reconnect:
if self.currentconn:
self.currentconn.close()
if self.ssl:
self.currentconn = HTTPSConnectionWithValidation(
self.server, self.port, timeout=timeout)
if self.currentconn is None:
LOG.error(_('ServerProxy: Could not establish HTTPS '
'connection'))
return 0, None, None, None
self.currentconn.combined_cert = self.combined_cert
else:
self.currentconn = httplib.HTTPConnection(
self.server, self.port, timeout=timeout)
if self.currentconn is None:
LOG.error(_('ServerProxy: Could not establish HTTP '
'connection'))
return 0, None, None, None
try:
self.currentconn.request(action, uri, body, headers)
response = self.currentconn.getresponse()
respstr = response.read()
respdata = respstr
if response.status in self.success_codes:
hash_value = response.getheader(HASH_MATCH_HEADER)
# don't clear hash from DB if a hash header wasn't present
if hash_value is not None:
hash_handler.put_hash(hash_value)
try:
respdata = jsonutils.loads(respstr)
except ValueError:
# response was not JSON, ignore the exception
pass
ret = (response.status, response.reason, respstr, respdata)
except httplib.HTTPException:
# If we were using a cached connection, try again with a new one.
with excutils.save_and_reraise_exception() as ctxt:
self.currentconn.close()
if reconnect:
# if reconnect is true, this was on a fresh connection so
# reraise since this server seems to be broken
ctxt.reraise = True
else:
# if reconnect is false, it was a cached connection so
# try one more time before re-raising
ctxt.reraise = False
return self.rest_call(action, resource, data, headers,
timeout=timeout, reconnect=True)
except (socket.timeout, socket.error) as e:
self.currentconn.close()
LOG.error(_('ServerProxy: %(action)s failure, %(e)r'),
{'action': action, 'e': e})
ret = 0, None, None, None
LOG.debug(_("ServerProxy: status=%(status)d, reason=%(reason)r, "
"ret=%(ret)s, data=%(data)r"), {'status': ret[0],
'reason': ret[1],
'ret': ret[2],
'data': ret[3]})
return ret
class ServerPool(object):
_instance = None
@classmethod
def get_instance(cls):
if cls._instance:
return cls._instance
cls._instance = cls()
return cls._instance
def __init__(self, timeout=False,
base_uri=BASE_URI, name='NeutronRestProxy'):
LOG.debug(_("ServerPool: initializing"))
# 'servers' is the list of network controller REST end-points
# (used in order specified till one succeeds, and it is sticky
# till next failure). Use 'server_auth' to encode api-key
servers = cfg.CONF.RESTPROXY.servers
self.auth = cfg.CONF.RESTPROXY.server_auth
self.ssl = cfg.CONF.RESTPROXY.server_ssl
self.neutron_id = cfg.CONF.RESTPROXY.neutron_id
self.base_uri = base_uri
self.name = name
self.contexts = {}
self.timeout = cfg.CONF.RESTPROXY.server_timeout
self.always_reconnect = not cfg.CONF.RESTPROXY.cache_connections
default_port = 8000
if timeout is not False:
self.timeout = timeout
# Function to use to retrieve topology for consistency syncs.
# Needs to be set by module that uses the servermanager.
self.get_topo_function = None
self.get_topo_function_args = {}
if not servers:
raise cfg.Error(_('Servers not defined. Aborting server manager.'))
servers = [s if len(s.rsplit(':', 1)) == 2
else "%s:%d" % (s, default_port)
for s in servers]
if any((len(spl) != 2 or not spl[1].isdigit())
for spl in [sp.rsplit(':', 1)
for sp in servers]):
raise cfg.Error(_('Servers must be defined as <ip>:<port>. '
'Configuration was %s') % servers)
self.servers = [
self.server_proxy_for(server, int(port))
for server, port in (s.rsplit(':', 1) for s in servers)
]
eventlet.spawn(self._consistency_watchdog,
cfg.CONF.RESTPROXY.consistency_interval)
ServerPool._instance = self
LOG.debug(_("ServerPool: initialization done"))
def set_context(self, context):
# this context needs to be local to the greenthread
# so concurrent requests don't use the wrong context.
# Use a weakref so the context is garbage collected
# after the plugin is done with it.
ref = weakref.ref(context)
self.contexts[eventlet.corolocal.get_ident()] = ref
def get_context_ref(self):
# Try to get the context cached for this thread. If one
# doesn't exist or if it's been garbage collected, this will
# just return None.
try:
return self.contexts[eventlet.corolocal.get_ident()]()
except KeyError:
return None
def get_capabilities(self):
# lookup on first try
try:
return self.capabilities
except AttributeError:
# each server should return a list of capabilities it supports
# e.g. ['floatingip']
capabilities = [set(server.get_capabilities())
for server in self.servers]
# Pool only supports what all of the servers support
self.capabilities = set.intersection(*capabilities)
return self.capabilities
def server_proxy_for(self, server, port):
combined_cert = self._get_combined_cert_for_server(server, port)
return ServerProxy(server, port, self.ssl, self.auth, self.neutron_id,
self.timeout, self.base_uri, self.name, mypool=self,
combined_cert=combined_cert)
def _get_combined_cert_for_server(self, server, port):
# The ssl library requires a combined file with all trusted certs
# so we make one containing the trusted CAs and the corresponding
# host cert for this server
combined_cert = None
if self.ssl and not cfg.CONF.RESTPROXY.no_ssl_validation:
base_ssl = cfg.CONF.RESTPROXY.ssl_cert_directory
host_dir = os.path.join(base_ssl, 'host_certs')
ca_dir = os.path.join(base_ssl, 'ca_certs')
combined_dir = os.path.join(base_ssl, 'combined')
combined_cert = os.path.join(combined_dir, '%s.pem' % server)
if not os.path.exists(base_ssl):
raise cfg.Error(_('ssl_cert_directory [%s] does not exist. '
'Create it or disable ssl.') % base_ssl)
for automake in [combined_dir, ca_dir, host_dir]:
if not os.path.exists(automake):
os.makedirs(automake)
# get all CA certs
certs = self._get_ca_cert_paths(ca_dir)
# check for a host specific cert
hcert, exists = self._get_host_cert_path(host_dir, server)
if exists:
certs.append(hcert)
elif cfg.CONF.RESTPROXY.ssl_sticky:
self._fetch_and_store_cert(server, port, hcert)
certs.append(hcert)
if not certs:
raise cfg.Error(_('No certificates were found to verify '
'controller %s') % (server))
self._combine_certs_to_file(certs, combined_cert)
return combined_cert
def _combine_certs_to_file(self, certs, cfile):
'''
Concatenates the contents of each certificate in a list of
certificate paths to one combined location for use with ssl
sockets.
'''
with open(cfile, 'w') as combined:
for c in certs:
with open(c, 'r') as cert_handle:
combined.write(cert_handle.read())
def _get_host_cert_path(self, host_dir, server):
'''
returns full path and boolean indicating existence
'''
hcert = os.path.join(host_dir, '%s.pem' % server)
if os.path.exists(hcert):
return hcert, True
return hcert, False
def _get_ca_cert_paths(self, ca_dir):
certs = [os.path.join(root, name)
for name in [
name for (root, dirs, files) in os.walk(ca_dir)
for name in files
]
if name.endswith('.pem')]
return certs
def _fetch_and_store_cert(self, server, port, path):
'''
Grabs a certificate from a server and writes it to
a given path.
'''
try:
cert = ssl.get_server_certificate((server, port))
except Exception as e:
raise cfg.Error(_('Could not retrieve initial '
'certificate from controller %(server)s. '
'Error details: %(error)s') %
{'server': server, 'error': str(e)})
LOG.warning(_("Storing to certificate for host %(server)s "
"at %(path)s") % {'server': server,
'path': path})
self._file_put_contents(path, cert)
return cert
def _file_put_contents(self, path, contents):
# Simple method to write to file.
# Created for easy Mocking
with open(path, 'w') as handle:
handle.write(contents)
def server_failure(self, resp, ignore_codes=[]):
"""Define failure codes as required.
Note: We assume 301-303 is a failure, and try the next server in
the server pool.
"""
return (resp[0] in FAILURE_CODES and resp[0] not in ignore_codes)
def action_success(self, resp):
"""Defining success codes as required.
Note: We assume any valid 2xx as being successful response.
"""
return resp[0] in SUCCESS_CODES
@utils.synchronized('bsn-rest-call')
def rest_call(self, action, resource, data, headers, ignore_codes,
timeout=False):
context = self.get_context_ref()
if context:
# include the requesting context information if available
cdict = context.to_dict()
# remove the auth token so it's not present in debug logs on the
# backend controller
cdict.pop('auth_token', None)
headers[REQ_CONTEXT_HEADER] = jsonutils.dumps(cdict)
hash_handler = cdb.HashHandler(context=context)
good_first = sorted(self.servers, key=lambda x: x.failed)
first_response = None
for active_server in good_first:
for x in range(HTTP_SERVICE_UNAVAILABLE_RETRY_COUNT + 1):
ret = active_server.rest_call(action, resource, data, headers,
timeout,
reconnect=self.always_reconnect,
hash_handler=hash_handler)
if ret[0] != httplib.SERVICE_UNAVAILABLE:
break
time.sleep(HTTP_SERVICE_UNAVAILABLE_RETRY_INTERVAL)
# If inconsistent, do a full synchronization
if ret[0] == httplib.CONFLICT:
if not self.get_topo_function:
raise cfg.Error(_('Server requires synchronization, '
'but no topology function was defined.'))
# The hash was incorrect so it needs to be removed
hash_handler.put_hash('')
data = self.get_topo_function(**self.get_topo_function_args)
active_server.rest_call('PUT', TOPOLOGY_PATH, data,
timeout=None)
# Store the first response as the error to be bubbled up to the
# user since it was a good server. Subsequent servers will most
# likely be cluster slaves and won't have a useful error for the
# user (e.g. 302 redirect to master)
if not first_response:
first_response = ret
if not self.server_failure(ret, ignore_codes):
active_server.failed = False
return ret
else:
LOG.error(_('ServerProxy: %(action)s failure for servers: '
'%(server)r Response: %(response)s'),
{'action': action,
'server': (active_server.server,
active_server.port),
'response': ret[3]})
LOG.error(_("ServerProxy: Error details: status=%(status)d, "
"reason=%(reason)r, ret=%(ret)s, data=%(data)r"),
{'status': ret[0], 'reason': ret[1], 'ret': ret[2],
'data': ret[3]})
active_server.failed = True
# All servers failed, reset server list and try again next time
LOG.error(_('ServerProxy: %(action)s failure for all servers: '
'%(server)r'),
{'action': action,
'server': tuple((s.server,
s.port) for s in self.servers)})
return first_response
def rest_action(self, action, resource, data='', errstr='%s',
ignore_codes=None, headers=None, timeout=False):
"""
Wrapper for rest_call that verifies success and raises a
RemoteRestError on failure with a provided error string
By default, 404 errors on DELETE calls are ignored because
they already do not exist on the backend.
"""
ignore_codes = ignore_codes or []
headers = headers or {}
if not ignore_codes and action == 'DELETE':
ignore_codes = [404]
resp = self.rest_call(action, resource, data, headers, ignore_codes,
timeout)
if self.server_failure(resp, ignore_codes):
LOG.error(errstr, resp[2])
raise RemoteRestError(reason=resp[2], status=resp[0])
if resp[0] in ignore_codes:
LOG.warning(_("NeutronRestProxyV2: Received and ignored error "
"code %(code)s on %(action)s action to resource "
"%(resource)s"),
{'code': resp[2], 'action': action,
'resource': resource})
return resp
def rest_create_router(self, tenant_id, router):
resource = ROUTER_RESOURCE_PATH % tenant_id
data = {"router": router}
errstr = _("Unable to create remote router: %s")
self.rest_action('POST', resource, data, errstr)
def rest_update_router(self, tenant_id, router, router_id):
resource = ROUTERS_PATH % (tenant_id, router_id)
data = {"router": router}
errstr = _("Unable to update remote router: %s")
self.rest_action('PUT', resource, data, errstr)
def rest_delete_router(self, tenant_id, router_id):
resource = ROUTERS_PATH % (tenant_id, router_id)
errstr = _("Unable to delete remote router: %s")
self.rest_action('DELETE', resource, errstr=errstr)
def rest_add_router_interface(self, tenant_id, router_id, intf_details):
resource = ROUTER_INTF_OP_PATH % (tenant_id, router_id)
data = {"interface": intf_details}
errstr = _("Unable to add router interface: %s")
self.rest_action('POST', resource, data, errstr)
def rest_remove_router_interface(self, tenant_id, router_id, interface_id):
resource = ROUTER_INTF_PATH % (tenant_id, router_id, interface_id)
errstr = _("Unable to delete remote intf: %s")
self.rest_action('DELETE', resource, errstr=errstr)
def rest_create_network(self, tenant_id, network):
resource = NET_RESOURCE_PATH % tenant_id
data = {"network": network}
errstr = _("Unable to create remote network: %s")
self.rest_action('POST', resource, data, errstr)
def rest_update_network(self, tenant_id, net_id, network):
resource = NETWORKS_PATH % (tenant_id, net_id)
data = {"network": network}
errstr = _("Unable to update remote network: %s")
self.rest_action('PUT', resource, data, errstr)
def rest_delete_network(self, tenant_id, net_id):
resource = NETWORKS_PATH % (tenant_id, net_id)
errstr = _("Unable to update remote network: %s")
self.rest_action('DELETE', resource, errstr=errstr)
def rest_create_port(self, tenant_id, net_id, port):
resource = ATTACHMENT_PATH % (tenant_id, net_id, port["id"])
data = {"port": port}
device_id = port.get("device_id")
if not port["mac_address"] or not device_id:
# controller only cares about ports attached to devices
LOG.warning(_("No device MAC attached to port %s. "
"Skipping notification to controller."), port["id"])
return
data["attachment"] = {"id": device_id,
"mac": port["mac_address"]}
errstr = _("Unable to create remote port: %s")
self.rest_action('PUT', resource, data, errstr)
def rest_delete_port(self, tenant_id, network_id, port_id):
resource = ATTACHMENT_PATH % (tenant_id, network_id, port_id)
errstr = _("Unable to delete remote port: %s")
self.rest_action('DELETE', resource, errstr=errstr)
def rest_update_port(self, tenant_id, net_id, port):
# Controller has no update operation for the port endpoint
# the create PUT method will replace
self.rest_create_port(tenant_id, net_id, port)
def rest_create_floatingip(self, tenant_id, floatingip):
resource = FLOATINGIPS_PATH % (tenant_id, floatingip['id'])
errstr = _("Unable to create floating IP: %s")
self.rest_action('PUT', resource, errstr=errstr)
def rest_update_floatingip(self, tenant_id, floatingip, oldid):
resource = FLOATINGIPS_PATH % (tenant_id, oldid)
errstr = _("Unable to update floating IP: %s")
self.rest_action('PUT', resource, errstr=errstr)
def rest_delete_floatingip(self, tenant_id, oldid):
resource = FLOATINGIPS_PATH % (tenant_id, oldid)
errstr = _("Unable to delete floating IP: %s")
self.rest_action('DELETE', resource, errstr=errstr)
def rest_get_switch(self, switch_id):
resource = SWITCHES_PATH % switch_id
errstr = _("Unable to retrieve switch: %s")
return self.rest_action('GET', resource, errstr=errstr)
def _consistency_watchdog(self, polling_interval=60):
if 'consistency' not in self.get_capabilities():
LOG.warning(_("Backend server(s) do not support automated "
"consitency checks."))
return
if not polling_interval:
LOG.warning(_("Consistency watchdog disabled by polling interval "
"setting of %s."), polling_interval)
return
while True:
# If consistency is supported, all we have to do is make any
# rest call and the consistency header will be added. If it
# doesn't match, the backend will return a synchronization error
# that will be handled by the rest_action.
eventlet.sleep(polling_interval)
try:
self.rest_action('GET', HEALTH_PATH)
except Exception:
LOG.exception(_("Encountered an error checking controller "
"health."))
class HTTPSConnectionWithValidation(httplib.HTTPSConnection):
# If combined_cert is None, the connection will continue without
# any certificate validation.
combined_cert = None
def connect(self):
try:
sock = socket.create_connection((self.host, self.port),
self.timeout, self.source_address)
except AttributeError:
# python 2.6 doesn't have the source_address attribute
sock = socket.create_connection((self.host, self.port),
self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
if self.combined_cert:
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.combined_cert)
else:
self.sock = ssl.wrap_socket(sock, self.key_file,
self.cert_file,
cert_reqs=ssl.CERT_NONE)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test run for a single file and a display of how many events are collected."""
import argparse
import collections
import cProfile
import logging
import os
import pstats
import sys
import time
from dfvfs.lib import definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.proto import transmission_pb2
from dfvfs.resolver import resolver as path_spec_resolver
from dfvfs.serializer import protobuf_serializer
from google.protobuf import text_format
try:
# Support version 1.X of IPython.
# pylint: disable=no-name-in-module
from IPython.terminal.embed import InteractiveShellEmbed
except ImportError:
# Support version older than 1.X of IPython.
# pylint: disable=no-name-in-module
from IPython.frontend.terminal.embed import InteractiveShellEmbed
import pyevt
import pyevtx
import pylnk
import pymsiecf
import pyregf
import plaso
from plaso.engine import engine
from plaso.frontend import psort
from plaso.frontend import utils as frontend_utils
from plaso.lib import queue
# TODO: Remove this after the dfVFS integration.
# TODO: Make sure we don't need to imlement the method _ConsumeItem, or
# to have that not as an abstract method.
# pylint: disable=abstract-method
class PprofEventObjectQueueConsumer(queue.EventObjectQueueConsumer):
"""Class that implements an event object queue consumer for pprof."""
def __init__(self, queue_object):
"""Initializes the queue consumer.
Args:
queue_object: the queue object (instance of Queue).
"""
super(PprofEventObjectQueueConsumer, self).__init__(queue_object)
self.counter = collections.Counter()
self.parsers = []
self.plugins = []
def _ConsumeEventObject(self, event_object, **unused_kwargs):
"""Consumes an event object callback for ConsumeEventObject."""
parser = getattr(event_object, 'parser', u'N/A')
if parser not in self.parsers:
self.parsers.append(parser)
plugin = getattr(event_object, 'plugin', u'N/A')
if plugin not in self.plugins:
self.plugins.append(plugin)
self.counter[parser] += 1
if plugin != u'N/A':
self.counter[u'[Plugin] {}'.format(plugin)] += 1
self.counter['Total'] += 1
def PrintHeader(options):
"""Print header information, including library versions."""
print frontend_utils.FormatHeader('File Parsed')
print u'{:>20s}'.format(options.file_to_parse)
print frontend_utils.FormatHeader('Versions')
print frontend_utils.FormatOutputString('plaso engine', plaso.GetVersion())
print frontend_utils.FormatOutputString('pyevt', pyevt.get_version())
print frontend_utils.FormatOutputString('pyevtx', pyevtx.get_version())
print frontend_utils.FormatOutputString('pylnk', pylnk.get_version())
print frontend_utils.FormatOutputString('pymsiecf', pymsiecf.get_version())
print frontend_utils.FormatOutputString('pyregf', pyregf.get_version())
if options.filter:
print frontend_utils.FormatHeader('Filter Used')
print frontend_utils.FormatOutputString('Filter String', options.filter)
if options.parsers:
print frontend_utils.FormatHeader('Parser Filter Used')
print frontend_utils.FormatOutputString('Parser String', options.parsers)
def ProcessStorage(options):
"""Process a storage file and produce profile results.
Args:
options: the command line arguments (instance of argparse.Namespace).
Returns:
The profiling statistics or None on error.
"""
storage_parameters = options.storage.split()
storage_parameters.append(options.file_to_parse)
if options.filter:
storage_parameters.append(options.filter)
if options.verbose:
# TODO: why not move this functionality into psort?
profiler = cProfile.Profile()
profiler.enable()
else:
time_start = time.time()
# Call psort and process output.
return_value = psort.Main(storage_parameters)
if options.verbose:
profiler.disable()
else:
time_end = time.time()
if return_value:
print u'Parsed storage file.'
else:
print u'It appears the storage file may not have processed correctly.'
if options.verbose:
return GetStats(profiler)
else:
print frontend_utils.FormatHeader('Time Used')
print u'{:>20f}s'.format(time_end - time_start)
def ProcessFile(options):
"""Process a file and produce profile results."""
if options.proto_file and os.path.isfile(options.proto_file):
with open(options.proto_file) as fh:
proto_string = fh.read()
proto = transmission_pb2.PathSpec()
try:
text_format.Merge(proto_string, proto)
except text_format.ParseError as exception:
logging.error(u'Unable to parse file, error: {}'.format(
exception))
sys.exit(1)
serializer = protobuf_serializer.ProtobufPathSpecSerializer
path_spec = serializer.ReadSerializedObject(proto)
else:
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=options.file_to_parse)
file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec)
if file_entry is None:
logging.error(u'Unable to open file: {0:s}'.format(options.file_to_parse))
sys.exit(1)
# Set few options the engine expects to be there.
# TODO: Can we rather set this directly in argparse?
options.single_process = True
options.debug = False
options.text_prepend = u''
# Set up the engine.
collection_queue = queue.SingleThreadedQueue()
storage_queue = queue.SingleThreadedQueue()
parse_error_queue = queue.SingleThreadedQueue()
engine_object = engine.Engine(
collection_queue, storage_queue, parse_error_queue)
# Create a worker.
worker_object = engine_object.CreateExtractionWorker('0')
# TODO: add support for parser_filter_string.
worker_object.InitalizeParserObjects()
if options.verbose:
profiler = cProfile.Profile()
profiler.enable()
else:
time_start = time.time()
worker_object.ParseFileEntry(file_entry)
if options.verbose:
profiler.disable()
else:
time_end = time.time()
engine_object.SignalEndOfInputStorageQueue()
event_object_consumer = PprofEventObjectQueueConsumer(storage_queue)
event_object_consumer.ConsumeEventObjects()
if not options.verbose:
print frontend_utils.FormatHeader('Time Used')
print u'{:>20f}s'.format(time_end - time_start)
print frontend_utils.FormatHeader('Parsers Loaded')
# Accessing protected member.
# pylint: disable=protected-access
plugins = []
for parser_object in sorted(worker_object._parser_objects):
print frontend_utils.FormatOutputString('', parser_object.NAME)
parser_plugins = getattr(parser_object, '_plugins', [])
plugins.extend(parser_plugins)
print frontend_utils.FormatHeader('Plugins Loaded')
for plugin in sorted(plugins):
if isinstance(plugin, basestring):
print frontend_utils.FormatOutputString('', plugin)
else:
plugin_string = getattr(plugin, 'NAME', u'N/A')
print frontend_utils.FormatOutputString('', plugin_string)
print frontend_utils.FormatHeader('Parsers Used')
for parser in sorted(event_object_consumer.parsers):
print frontend_utils.FormatOutputString('', parser)
print frontend_utils.FormatHeader('Plugins Used')
for plugin in sorted(event_object_consumer.plugins):
print frontend_utils.FormatOutputString('', plugin)
print frontend_utils.FormatHeader('Counter')
for key, value in event_object_consumer.counter.most_common():
print frontend_utils.FormatOutputString(key, value)
if options.verbose:
return GetStats(profiler)
def GetStats(profiler):
"""Print verbose information from profiler and return a stats object."""
stats = pstats.Stats(profiler, stream=sys.stdout)
print frontend_utils.FormatHeader('Profiler')
print '\n{:-^20}'.format(' Top 10 Time Spent ')
stats.sort_stats('cumulative')
stats.print_stats(10)
print '\n{:-^20}'.format(' Sorted By Function Calls ')
stats.sort_stats('calls')
stats.print_stats()
return stats
def Main():
"""Start the tool."""
usage = (
u'Run this tool against a single file to see how many events are '
u'extracted from it and which parsers recognize it.')
arg_parser = argparse.ArgumentParser(description=usage)
format_str = '[%(levelname)s] %(message)s'
logging.basicConfig(level=logging.INFO, format=format_str)
arg_parser.add_argument(
'-v', '--verbose', dest='verbose', action='store_true', default=False,
help=(
'Be extra verbose in the information printed out (include full '
'stats).'))
arg_parser.add_argument(
'-c', '--console', dest='console', action='store_true',
default=False, help='After processing drop to an interactive shell.')
arg_parser.add_argument(
'-p', '--parsers', dest='parsers', action='store', default='', type=str,
help='A list of parsers to include (see log2timeline documentation).')
arg_parser.add_argument(
'--proto', dest='proto_file', action='store', default='', type=unicode,
metavar='PROTO_FILE', help=(
'A file containing an ASCII PathSpec protobuf describing how to '
'open up the file for parsing.'))
arg_parser.add_argument(
'-s', '--storage', dest='storage', action='store', type=unicode,
metavar='PSORT_PARAMETER', default='', help=(
'Run the profiler against a storage file, with the parameters '
'provided with this option, eg: "-q -w /dev/null". The storage '
'file has to be passed in as the FILE_TO_PARSE argument to the '
'tool and filters are also optional. This is equivilant to calling '
'psort.py STORAGE_PARAMETER FILE_TO_PARSE [FILTER]. Where the '
'storage parameters are the ones defined with this parameter.'))
# TODO: Add the option of dropping into a python shell that contains the
# stats attribute and others, just print out basic information and do the
# profiling, then drop into a ipython shell that allows you to work with
# the stats object.
arg_parser.add_argument(
'file_to_parse', nargs='?', action='store', metavar='FILE_TO_PARSE',
default=None, help='A path to the file that is to be parsed.')
arg_parser.add_argument(
'filter', action='store', metavar='FILTER', nargs='?', default=None,
help=('A filter that can be used to filter the dataset before it '
'is written into storage. More information about the filters'
' and it\'s usage can be found here: http://plaso.kiddaland.'
'net/usage/filters'))
options = arg_parser.parse_args()
if not (options.file_to_parse or options.proto_file):
arg_parser.print_help()
print ''
arg_parser.print_usage()
print ''
logging.error('Not able to run without a file to process.')
return False
if options.file_to_parse and not os.path.isfile(options.file_to_parse):
logging.error(u'File [{0:s}] needs to exist.'.format(options.file_to_parse))
return False
PrintHeader(options)
# Stats attribute used for console sessions.
# pylint: disable=unused-variable
if options.storage:
stats = ProcessStorage(options)
else:
stats = ProcessFile(options)
if options.console:
ipshell = InteractiveShellEmbed()
ipshell.confirm_exit = False
ipshell()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
|
|
# file: gini_nw.py
from ImImagePlugin import number
import os
import xml.dom.minidom
from gini_components import *
class GINI_NW:
switches = []
vm = []
vrm = []
vmb = []
vr = []
cr=[]
vwr = []
def __init__(self, docDOM):
"Initialize the GINI_NW class"
self.getSwitches(docDOM.getElementsByTagName("vs"))
self.getVMs(docDOM.getElementsByTagName('vm'))
self.getVRMs(docDOM.getElementsByTagName('vrm'))
self.getVMBs(docDOM.getElementsByTagName("vmb"))
self.getVRs(docDOM.getElementsByTagName("vr"))
self.getVWRs(docDOM.getElementsByTagName("vwr"))
def getSwitches(self, elements):
"get the switch configuration"
for switch in elements:
newSwitch = Switch(switch.getAttribute("name"))
for para in switch.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "priority"):
newSwitch.priority = self.getTextPart(para)
if (para.tagName.lower() == "mac"):
newSwitch.mac = self.getTextPart(para)
if (para.tagName.lower() == "target"):
newSwitch.targets.append(self.getTextPart(para))
if (para.tagName.lower() == "port"):
newSwitch.port = self.getTextPart(para)
if (para.tagName.lower() == "remote"):
newSwitch.remote = self.getTextPart(para)
if (para.tagName.lower() == "hub"):
newSwitch.hub = True
self.switches.append(newSwitch)
return True
def getVMs(self, elements):
"get virtual machine configurations"
for vm in elements:
newVM = VM(vm.getAttribute("name"))
for para in vm.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "filesystem"):
newVM.fileSystem = FileSystem()
newVM.fileSystem.type = para.getAttribute("type")
newVM.fileSystem.name = os.environ["GINI_SHARE"] + "/filesystem/" + self.getTextPart(para)
if (para.tagName.lower() == "mem"):
newVM.mem = self.getTextPart(para)
if (para.tagName.lower() == "kernel"):
newVM.kernel = self.getTextPart(para)
if (para.tagName.lower() == "boot"):
newVM.boot = self.getBoot(para)
if (para.tagName.lower() == "if"):
newIF = self.getVMIF(para, len(newVM.interfaces))
newVM.addInterface(newIF)
self.vm.append(newVM)
return True
def getVRMs(self, elements):
"get remote machine configurations"
for vrm in elements:
newVRM = VRM(vrm.getAttribute("name"))
for para in vrm.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "filesystem"):
newVRM.fileSystem = FileSystem()
newVRM.fileSystem.type = para.getAttribute("type")
newVRM.fileSystem.name = os.environ["GINI_HOME"] + "/" + self.getTextPart(para)
if (para.tagName.lower() == "mem"):
newVRM.mem = self.getTextPart(para)
if (para.tagName.lower() == "kernel"):
newVRM.kernel = self.getTextPart(para)
if (para.tagName.lower() == "boot"):
newVRM.boot = self.getBoot(para)
if (para.tagName.lower() == "if"):
newIF = self.getVMIF(para, len(newVRM.interfaces))
newVRM.addInterface(newIF)
self.vrm.append(newVRM)
return True
def getCRs(self, elemments):
a=5
def getVMBs(self, elements):
"get wireless virtual machine configurations"
for vmb in elements:
newVMB = VMB(vmb.getAttribute("name"))
for para in vmb.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "filesystem"):
newVMB.fileSystem = FileSystem()
newVMB.fileSystem.type = para.getAttribute("type")
newVMB.fileSystem.name = os.environ["GINI_SHARE"] + "/filesystem/" + self.getTextPart(para)
if (para.tagName.lower() == "mem"):
newVMB.mem = self.getTextPart(para)
if (para.tagName.lower() == "kernel"):
newVMB.kernel = self.getTextPart(para)
if (para.tagName.lower() == "boot"):
newVMB.boot = self.getBoot(para)
if (para.tagName.lower() == "if"):
newIF = self.getVMIF(para, len(newVMB.interfaces))
newVMB.addInterface(newIF)
self.vmb.append(newVMB)
return True
def getVRs(self, elements):
"Get router specification"
for router in elements:
newVR = VR(router.getAttribute("name"))
numberOfTun=0
for para in router.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "cli"):
newVR.cli = True
if (para.tagName.lower() == "netif"):
newIF = self.getVRIF(para, len(newVR.netIF)+1)
newVR.addNetIF(newIF)
if (para.tagName.lower() == "loctun"):
newIF = self.getTUNIF(para, numberOfTun)
numberOfTun+=1
newVR.addTunIF(newIF)
if (para.tagName.lower() == "clotun"):
newIF = self.getTUNIF(para, numberOfTun)
numberOfTun+=1
newVR.addTunIF(newIF)
self.vr.append(newVR)
return True
def getVWRs(self, elements):
"Get wireless router specification"
for router in elements:
newVWR = VWR(router.getAttribute("name"))
for para in router.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "cli"):
newVWR.cli = True
if (para.tagName.lower() == "netif"):
newIF = self.getVRIF(para, len(newVWR.netIF))
newVWR.addNetIF(newIF)
if (para.tagName.lower() == "netif_wireless"):
newWIF = self.getVWRIF(para, len(newVWR.netIFWireless))
newVWR.addWirelessIF(newWIF)
self.vwr.append(newVWR)
return True
def getTextPart(self,elem):
"Extract the text within the element"
for textPart in elem.childNodes:
if (textPart.nodeType == textPart.TEXT_NODE):
remoteName = textPart.nodeValue.strip()
if (remoteName):
return remoteName
return ""
def getBoot(self, elem):
"get boot elememnt in VM specification"
for part in elem.childNodes:
if (part.nodeType == part.ELEMENT_NODE and
part.tagName.lower() == "con0"):
return self.getTextPart(part)
return ""
def getVMIF(self, elem, count):
"get VM network interface specification"
ifName = "eth%d" % count
myIF = VMInterface(ifName)
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "target"):
myIF.target = self.getTextPart(para)
if (para.tagName.lower() == "mac"):
myIF.mac = self.getTextPart(para)
if (para.tagName.lower() == "ip"):
myIF.ip = self.getTextPart(para)
if (para.tagName.lower() == "route"):
newRoute = self.getVMRoute(para)
myIF.addRoute(newRoute)
return myIF
def getVMRoute(self, elem):
"Extract VM route entries"
newRoute = VMRoute()
newRoute.type = elem.getAttribute("type")
newRoute.netmask = elem.getAttribute("netmask")
newRoute.gw = elem.getAttribute("gw")
newRoute.dest = self.getTextPart(elem)
return newRoute
def getVRIF(self, elem, index):
"get virtual router network interface"
ifName = "eth%d" % index
myIF = VRInterface(ifName)
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "target"):
myIF.target = self.getTextPart(para)
if (para.tagName.lower() == "nic"):
myIF.nic = self.getTextPart(para)
if (para.tagName.lower() == "ip"):
myIF.ip = self.getTextPart(para)
if (para.tagName.lower() == "network"):
myIF.network = self.getTextPart(para)
if (para.tagName.lower() == "gw"):
myIF.gw = self.getTextPart(para)
if (para.tagName.lower() == "mtu"):
myIF.mtu = self.getTextPart(para)
if (para.tagName.lower() == "rtentry"):
newRoute = self.getVRRoute(para)
myIF.addRoute(newRoute)
return myIF
def getTUNIF(self, elem, index):
"get virtual router network interface"
ifName = "tun%d" % index
myIF = VRInterface(ifName)
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "target"):
myIF.target = self.getTextPart(para)
if (para.tagName.lower() == "nic"):
myIF.nic = self.getTextPart(para)
if (para.tagName.lower() == "ip"):
myIF.ip = self.getTextPart(para)
if (para.tagName.lower() == "network"):
myIF.network = self.getTextPart(para)
if (para.tagName.lower() == "gw"):
myIF.gw = self.getTextPart(para)
if (para.tagName.lower() == "mtu"):
myIF.mtu = self.getTextPart(para)
if (para.tagName.lower() == "rtentry"):
newRoute = self.getVRRoute(para)
myIF.addRoute(newRoute)
return myIF
def getVWRIF(self, elem, index):
"get virtual wireless router network interface"
ifName = "eth%d" % index
myWIF = VWRInterface(ifName)
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "nic"):
myWIF.nic = self.getTextPart(para)
if (para.tagName.lower() == "ip"):
myWIF.ip = self.getTextPart(para)
if (para.tagName.lower() == "network"):
myWIF.network = self.getTextPart(para)
if (para.tagName.lower() == "rtentry"):
newRoute = self.getVRRoute(para)
myWIF.addRoute(newRoute)
if (para.tagName.lower() == "wireless_card"):
newWcard = self.getWcard(para)
myWIF.wireless_card = newWcard
if (para.tagName.lower() == "energy"):
newEnergy = self.getEnergy(para)
myWIF.energy = newEnergy
if (para.tagName.lower() == "mac_layer"):
newMlayer = self.getMlayer(para)
myWIF.mac_layer = newMlayer
if (para.tagName.lower() == "antenna"):
newAntenna = self.getAntenna(para)
myWIF.antenna = newAntenna
if (para.tagName.lower() == "mobility"):
newMobility = self.getMobility(para)
myWIF.mobility = newMobility
return myWIF
def getWcard(self, elem):
newWcard = WirelessCard()
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "w_type"):
newWcard.wType = self.getTextPart(para)
if (para.tagName.lower() == "freq"):
newWcard.freq = self.getTextPart(para)
if (para.tagName.lower() == "bandwidth"):
newWcard.bandwidth = self.getTextPart(para)
if (para.tagName.lower() == "pt"):
newWcard.pt = self.getTextPart(para)
if (para.tagName.lower() == "pt_c"):
newWcard.ptC = self.getTextPart(para)
if (para.tagName.lower() == "pr_c"):
newWcard.prC = self.getTextPart(para)
if (para.tagName.lower() == "p_idle"):
newWcard.pIdle = self.getTextPart(para)
if (para.tagName.lower() == "p_sleep"):
newWcard.pSleep = self.getTextPart(para)
if (para.tagName.lower() == "p_off"):
newWcard.pOff = self.getTextPart(para)
if (para.tagName.lower() == "rx"):
newWcard.rx = self.getTextPart(para)
if (para.tagName.lower() == "cs"):
newWcard.cs = self.getTextPart(para)
if (para.tagName.lower() == "cp"):
newWcard.cp = self.getTextPart(para)
if (para.tagName.lower() == "module"):
newWcard.module = self.getTextPart(para)
return newWcard
def getEnergy(self, elem):
newEnergy = Energy()
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "power"):
newEnergy.power = self.getTextPart(para)
if (para.tagName.lower() == "psm"):
newEnergy.psm = self.getTextPart(para)
if (para.tagName.lower() == "energy_amount"):
newEnergy.energyAmount = self.getTextPart(para)
return newEnergy
def getMlayer(self, elem):
newMlayer = MacLayer()
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "mac_type"):
newMlayer.macType = self.getTextPart(para)
if (para.tagName.lower() == "trans"):
newMlayer.trans = self.getTextPart(para)
return newMlayer
def getAntenna(self, elem):
newAntenna = Antenna()
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "a_type"):
newAntenna.aType = self.getTextPart(para)
if (para.tagName.lower() == "ant_h"):
newAntenna.ant_h = self.getTextPart(para)
if (para.tagName.lower() == "ant_g"):
newAntenna.ant_g = self.getTextPart(para)
if (para.tagName.lower() == "ant_l"):
newAntenna.ant_l = self.getTextPart(para)
if (para.tagName.lower() == "jam"):
newAntenna.jam = self.getTextPart(para)
return newAntenna
def getMobility(self, elem):
newMobility = Mobility()
for para in elem.childNodes:
if (para.nodeType == para.ELEMENT_NODE):
if (para.tagName.lower() == "m_type"):
newMobility.mType = self.getTextPart(para)
if (para.tagName.lower() == "ran_max"):
newMobility.ranMax = self.getTextPart(para)
if (para.tagName.lower() == "ran_min"):
newMobility.ranMin = self.getTextPart(para)
return newMobility
def getVRRoute(self, elem):
"Extract VR route entries"
newRoute = VRRoute()
newRoute.netmask = elem.getAttribute("netmask")
newRoute.nexthop = elem.getAttribute("nexthop")
newRoute.dest = self.getTextPart(elem)
return newRoute
|
|
#!/usr/bin/env python
"""
Auditory-model features based on subband PCA (sbpca) faetures
Based on calcAudSemaFtrs matlab code.
2013-09-19 Dan Ellis dpwe@ee.columbia.edu
"""
import math
import numpy as np
import scipy.signal
import scipy.io
import scipy.cluster
import sbpca
# for main
import sys
# For SRI's wavreading code
import scipy.io.wavfile as wav
################### from spca_vqs.m
def vquantize(pcas, vqcodebook, vqmeans, vqstds):
"""
% vqs = sbpca_vqs(pcas, params)
% Convert subband principal components to vector quantized values
% for SBPCA.
% pcas are (pcadim == 10, chans == 24, timefrm)
% vqs are (blocks == 4, timefrm)
% each value is one VQ index (0..1023).
% 2013-08-03 Dan Ellis dpwe@ee.columbia.edu
"""
(n_rec, n_ftr, k_dim) = np.shape(vqcodebook) # 4 x 1000 x 60
(n_chs, n_dim, n_tim) = np.shape(pcas) # 24 x 10 x T
# Reshape pcas into vectors to be quantized
grouping = int(n_chs / n_rec) # better hope it's 24 / 4 = 6
# allocate
vqs = np.zeros( (n_rec, n_tim), int)
for i in range(n_rec):
chs = range(i*grouping, (i+1)*grouping)
# 60 dim codewords in Matlab file are ordered with the 6 chans as
# the fastest rotating dimension, so make sure python array does this
# too before collapsing 10x6 into rows of 60
vqs[i, ] = ac_vq_quantize(pcas[chs, :, :].transpose(1, 0, 2)
.reshape(grouping*n_dim, n_tim).transpose(),
vqcodebook[i, ], vqmeans[i, ], vqstds[i, ])
return vqs
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def ac_vq_quantize(data, vqcodes, vqmean, vqstd):
"""
% I = acVQQuantize(D,CB)
% Return I as the quantization of each data row D according to
% the codebook CB.mean, CB.std, CB.codewords.
% 2012-07-31 Dan Ellis dpwe@ee.columbia.edu
"""
(n_tim, n_d) = np.shape(data)
maxdrows = min(10000, n_tim)
nblocks = int(math.ceil(n_tim/maxdrows))
indx = np.zeros( n_tim, int)
if n_d > 0:
for blk in range(nblocks):
bkix = range(blk*maxdrows, min(n_tim, (blk+1)*maxdrows))
# Extract rows and apply mean/var normalization
d_block = (data[bkix, ] - vqmean) / vqstd
# Quantize
indx[bkix] = scipy.cluster.vq.vq(d_block, vqcodes)
return indx
#####################################
def hist(vqs, winframes, hopframes, codesperblock):
"""
% hists = sbpca_hist(vqs, params)
% Collapse the vq codewords into per-segment histograms.
% Each column of hists is the mean of all the frames in
% params.histwin seconds every params.histhop seconds.
%
% 2013-08-11 Dan Ellis dpwe@ee.columbia.edu rebuild of sbcpa calculation
"""
#winframes = int(np.round(params.histwin / params.hoptime))
#hopframes = int(np.round(params.histhop / params.hoptime))
codeblocks, vqslen = np.shape(vqs)
nblocks = int(np.round(vqslen/float(hopframes)))
#[codeblocks, ndim, codesperblock] = size(params.vqcodebook);
#% 4, (60), 1000
hists = np.zeros( (nblocks, codeblocks*codesperblock), float)
for blk in range(nblocks):
frameix = range(blk*hopframes, min(blk*hopframes+winframes, vqslen))
for cbk in range(codeblocks):
for frm in frameix:
idx = cbk*codesperblock + vqs[cbk, frm]
hists[blk, idx] += 1.0
hists[blk, :] /= len(frameix) * codeblocks
return hists
#####################################
# Main class
class AudFtr(object):
"""
Compute sbpca-based auditory features
meaning 4000-dim histograms over 2 sec windows.
"""
def __init__(self, conf):
""" Initialize default values """
#self.config = config
# initialize sbpca subsystem
self.sbpca = sbpca.SbPca(conf)
# set up AudFtr-specific params
# Read in the VQ codebooks
mat_cb = scipy.io.loadmat(conf['vq_file'])
# Read the codebook as rects x codewords x dims (4 x 1000 x 60)
self.vqcodebook = mat_cb["codebook"].transpose( (0, 2, 1) )
self.vqmeans = mat_cb["recMean"]
self.vqstds = mat_cb["recStd"]
self.hist_win = conf["hist_win"]
self.hist_hop = conf["hist_hop"]
def __call__(self, data, srate):
"""
Run the sbpcahist ftr extractor on the specified waveform/sampling rate
using the configuration specified on construction
Return a matrix of <ftrs> x <blocks>
"""
# Calculate the subband PCA features
pcas = self.sbpca.calc_sbpcas(data, srate)
# Vector quantize in individual frames
vqs = vquantize(pcas, self.vqcodebook, self.vqmeans, self.vqstds)
# Collapse blocks into histograms
hists = hist(vqs,
int(np.round(self.hist_win / self.sbpca.ac_hop)),
int(np.round(self.hist_hop / self.sbpca.ac_hop)),
np.shape(self.vqcodebook)[1])
return hists
############## Provide a command-line wrapper
def main(argv):
""" Main routine to calculate audftr files from command line """
if len(argv) != 3:
raise NameError( ("Usage: ", argv[0],
" inputsound.wav outputaudftr.txt") )
inwavfile = argv[1]
outptfile = argv[2]
# Setup config
config = {}
# sbpca params
# diff file for py
config['pca_file'] = 'aux/mapping-pca_sr8k_bpo6_sb24_k10.mat'
config['nchs'] = 24
config['SBF_sr'] = 8000
config['SBF_fmin'] = 100
config['SBF_bpo'] = 6
config['SBF_q'] = 8 # not actually used for SlanPat ERB filters
config['SBF_order'] = 2 # not actually used for SlanPat ERB filters
config['twin'] = 0.025 # autoco window len
config['thop'] = 0.010 # autoco hop
# audftr params
config['vq_file'] = '../CB-sbpca-4x60x1000.mat'
config['hist_win'] = 2.0 # histogram pooling window len
config['hist_hop'] = 2.0 # histogram hop
# Configure
ftr_extractor = AudFtr(config)
# Read in wav file
srate, wavd = wav.read(inwavfile)
# normalize short ints to floats of -1 / 1
data = np.asfarray(wavd) / 32768.0
# Apply
features = ftr_extractor(data, srate)
# Write the data out
nfr = np.size(features, axis=0)
np.savetxt(outptfile,
np.c_[np.zeros(nfr), range(nfr), features],
fmt='%.4f', delimiter=' ', newline='\n')
# Actually run main
if __name__ == "__main__":
main(sys.argv)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes.
"""
import collections
import datetime
import functools
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from cinder import context
from cinder.db import base
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI
from cinder.image import glance
from cinder import keymgr
from cinder.openstack.common import log as logging
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import utils
from cinder.volume.flows.api import create_volume
from cinder.volume import qos_specs
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
default=True,
help='Create volume from snapshot at the host '
'where snapshot resides')
volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az',
default=True,
help='Ensure that the new volumes are the '
'same AZ as snapshot or source volume')
az_cache_time_opt = cfg.IntOpt('az_cache_duration',
default=3600,
help='Cache volume availability zones in '
'memory for the provided duration in '
'seconds')
CONF = cfg.CONF
CONF.register_opt(volume_host_opt)
CONF.register_opt(volume_same_az_opt)
CONF.register_opt(az_cache_time_opt)
CONF.import_opt('glance_core_properties', 'cinder.image.glance')
CONF.import_opt('storage_availability_zone', 'cinder.volume.manager')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution
This decorator requires the first 3 args of the wrapped function
to be (self, context, volume)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
target.update(target_obj or {})
_action = 'volume:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager."""
def __init__(self, db_driver=None, image_service=None):
self.image_service = (image_service or
glance.get_default_image_service())
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zones = []
self.availability_zones_last_fetched = None
self.key_manager = keymgr.API()
super(API, self).__init__(db_driver)
def list_availability_zones(self, enable_cache=False):
"""Describe the known availability zones
:retval list of dicts, each with a 'name' and 'available' key
"""
refresh_cache = False
if enable_cache:
if self.availability_zones_last_fetched is None:
refresh_cache = True
else:
cache_age = timeutils.delta_seconds(
self.availability_zones_last_fetched,
timeutils.utcnow())
if cache_age >= CONF.az_cache_duration:
refresh_cache = True
if refresh_cache or not enable_cache:
topic = CONF.volume_topic
ctxt = context.get_admin_context()
services = self.db.service_get_all_by_topic(ctxt, topic)
az_data = [(s['availability_zone'], s['disabled'])
for s in services]
disabled_map = {}
for (az_name, disabled) in az_data:
tracked_disabled = disabled_map.get(az_name, True)
disabled_map[az_name] = tracked_disabled and disabled
azs = [{'name': name, 'available': not disabled}
for (name, disabled) in disabled_map.items()]
if refresh_cache:
now = timeutils.utcnow()
self.availability_zones = azs
self.availability_zones_last_fetched = now
LOG.debug("Availability zone cache updated, next update will"
" occur around %s.", now + datetime.timedelta(
seconds=CONF.az_cache_duration))
else:
azs = self.availability_zones
return tuple(azs)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None, source_volume=None,
scheduler_hints=None,
source_replica=None, consistencygroup=None):
# NOTE(jdg): we can have a create without size if we're
# doing a create from snap or volume. Currently
# the taskflow api will handle this and pull in the
# size from the source.
# NOTE(jdg): cinderclient sends in a string representation
# of the size value. BUT there is a possibility that somebody
# could call the API directly so the is_int_like check
# handles both cases (string representation of true float or int).
if size and (not utils.is_int_like(size) or int(size) <= 0):
msg = _('Invalid volume size provided for create request: %s '
'(size argument must be an integer (or string '
'representation of an integer) and greater '
'than zero).') % size
raise exception.InvalidInput(reason=msg)
if consistencygroup:
if not volume_type:
msg = _("volume_type must be provided when creating "
"a volume in a consistency group.")
raise exception.InvalidInput(reason=msg)
cg_voltypeids = consistencygroup.get('volume_type_id')
if volume_type.get('id') not in cg_voltypeids:
msg = _("Invalid volume_type provided: %s (requested "
"type must be supported by this consistency "
"group).") % volume_type
raise exception.InvalidInput(reason=msg)
if source_volume and volume_type:
if volume_type['id'] != source_volume['volume_type_id']:
msg = _("Invalid volume_type provided: %s (requested type "
"must match source volume, "
"or be omitted).") % volume_type
raise exception.InvalidInput(reason=msg)
# When cloning replica (for testing), volume type must be omitted
if source_replica and volume_type:
msg = _("No volume_type should be provided when creating test "
"replica.")
raise exception.InvalidInput(reason=msg)
if snapshot and volume_type:
if volume_type['id'] != snapshot['volume_type_id']:
msg = _("Invalid volume_type provided: %s (requested "
"type must match source snapshot, or be "
"omitted).") % volume_type
raise exception.InvalidInput(reason=msg)
# Determine the valid availability zones that the volume could be
# created in (a task in the flow will/can use this information to
# ensure that the availability zone requested is valid).
raw_zones = self.list_availability_zones(enable_cache=True)
availability_zones = set([az['name'] for az in raw_zones])
if CONF.storage_availability_zone:
availability_zones.add(CONF.storage_availability_zone)
create_what = {
'context': context,
'raw_size': size,
'name': name,
'description': description,
'snapshot': snapshot,
'image_id': image_id,
'raw_volume_type': volume_type,
'metadata': metadata,
'raw_availability_zone': availability_zone,
'source_volume': source_volume,
'scheduler_hints': scheduler_hints,
'key_manager': self.key_manager,
'source_replica': source_replica,
'optional_args': {'is_quota_committed': False},
'consistencygroup': consistencygroup
}
try:
flow_engine = create_volume.get_flow(self.scheduler_rpcapi,
self.volume_rpcapi,
self.db,
self.image_service,
availability_zones,
create_what)
except Exception:
LOG.exception(_LE("Failed to create api volume flow."))
raise exception.CinderException(
_("Failed to create api volume flow."))
# Attaching this listener will capture all of the notifications that
# taskflow sends out and redirect them to a more useful log for
# cinders debugging (or error reporting) usage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return flow_engine.storage.fetch('volume')
@wrap_check_policy
def delete(self, context, volume, force=False, unmanage_only=False):
if context.is_admin and context.project_id != volume['project_id']:
project_id = volume['project_id']
else:
project_id = context.project_id
volume_id = volume['id']
if not volume['host']:
volume_utils.notify_about_volume_usage(context,
volume, "delete.start")
# NOTE(vish): scheduling failed, so delete it
# Note(zhiteng): update volume quota reservation
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume['volume_type_id'])
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update quota while "
"deleting volume."))
self.db.volume_destroy(context.elevated(), volume_id)
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
volume_utils.notify_about_volume_usage(context,
volume, "delete.end")
return
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
LOG.info(_LI('Unable to delete volume: %s, '
'volume is attached.'), volume['id'])
raise exception.VolumeAttached(volume_id=volume_id)
if not force and volume['status'] not in ["available", "error",
"error_restoring",
"error_extending"]:
msg = _("Volume status must be available or error, "
"but current status is: %s.") % volume['status']
LOG.info(_LI('Unable to delete volume: %(vol_id)s, '
'volume must be available or '
'error, but is %(vol_status)s.'),
{'vol_id': volume['id'],
'vol_status': volume['status']})
raise exception.InvalidVolume(reason=msg)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
LOG.info(_LI('Unable to delete volume: %s, '
'volume is currently migrating.'), volume['id'])
msg = _("Volume cannot be deleted while migrating")
raise exception.InvalidVolume(reason=msg)
if volume['consistencygroup_id'] is not None:
msg = _("Volume cannot be deleted while in a consistency group.")
LOG.info(_LI('Unable to delete volume: %s, '
'volume is currently part of a '
'consistency group.'), volume['id'])
raise exception.InvalidVolume(reason=msg)
snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
if len(snapshots):
LOG.info(_LI('Unable to delete volume: %s, '
'volume currently has snapshots.'), volume['id'])
msg = _("Volume still has %d dependent "
"snapshots.") % len(snapshots)
raise exception.InvalidVolume(reason=msg)
# If the volume is encrypted, delete its encryption key from the key
# manager. This operation makes volume deletion an irreversible process
# because the volume cannot be decrypted without its key.
encryption_key_id = volume.get('encryption_key_id', None)
if encryption_key_id is not None:
self.key_manager.delete_key(context, encryption_key_id)
now = timeutils.utcnow()
self.db.volume_update(context, volume_id, {'status': 'deleting',
'terminated_at': now})
self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
LOG.info(_LI('Succesfully issued request to '
'delete volume: %s.'), volume['id'])
@wrap_check_policy
def update(self, context, volume, fields):
self.db.volume_update(context, volume['id'], fields)
def get(self, context, volume_id, viewable_admin_meta=False):
if viewable_admin_meta:
ctxt = context.elevated()
else:
ctxt = context
rv = self.db.volume_get(ctxt, volume_id)
volume = dict(rv.iteritems())
try:
check_policy(context, 'get', volume)
except exception.PolicyNotAuthorized:
# raise VolumeNotFound instead to make sure Cinder behaves
# as it used to
raise exception.VolumeNotFound(volume_id=volume_id)
return volume
def _get_all_tenants_value(self, filters):
"""Returns a Boolean for the value of filters['all_tenants'].
False is returned if 'all_tenants' is not in the filters dictionary.
An InvalidInput exception is thrown for invalid values.
"""
b = False
if 'all_tenants' in filters:
val = six.text_type(filters['all_tenants']).lower()
if val in ['true', '1']:
b = True
elif val in ['false', '0']:
b = False
else:
msg = _('all_tenants param must be 0 or 1')
raise exception.InvalidInput(reason=msg)
return b
def get_all(self, context, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters=None, viewable_admin_meta=False):
check_policy(context, 'get_all')
if filters is None:
filters = {}
allTenants = self._get_all_tenants_value(filters)
try:
if limit is not None:
limit = int(limit)
if limit < 0:
msg = _('limit param must be positive')
raise exception.InvalidInput(reason=msg)
except ValueError:
msg = _('limit param must be an integer')
raise exception.InvalidInput(reason=msg)
# Non-admin shouldn't see temporary target of a volume migration, add
# unique filter data to reflect that only volumes with a NULL
# 'migration_status' or a 'migration_status' that does not start with
# 'target:' should be returned (processed in db/sqlalchemy/api.py)
if not context.is_admin:
filters['no_migration_targets'] = True
if filters:
LOG.debug("Searching by: %s.", six.text_type(filters))
if context.is_admin and allTenants:
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
volumes = self.db.volume_get_all(context, marker, limit, sort_key,
sort_dir, filters=filters)
else:
if viewable_admin_meta:
context = context.elevated()
volumes = self.db.volume_get_all_by_project(context,
context.project_id,
marker, limit,
sort_key, sort_dir,
filters=filters)
return volumes
def get_snapshot(self, context, snapshot_id):
check_policy(context, 'get_snapshot')
rv = self.db.snapshot_get(context, snapshot_id)
return dict(rv.iteritems())
def get_volume(self, context, volume_id):
check_policy(context, 'get_volume')
rv = self.db.volume_get(context, volume_id)
return dict(rv.iteritems())
def get_all_snapshots(self, context, search_opts=None):
check_policy(context, 'get_all_snapshots')
search_opts = search_opts or {}
if (context.is_admin and 'all_tenants' in search_opts):
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
snapshots = self.db.snapshot_get_all(context)
else:
snapshots = self.db.snapshot_get_all_by_project(
context, context.project_id)
if search_opts:
LOG.debug("Searching by: %s", search_opts)
results = []
not_found = object()
for snapshot in snapshots:
for opt, value in search_opts.iteritems():
if snapshot.get(opt, not_found) != value:
break
else:
results.append(snapshot)
snapshots = results
return snapshots
@wrap_check_policy
def reserve_volume(self, context, volume):
# NOTE(jdg): check for Race condition bug 1096983
# explicitly get updated ref and check
volume = self.db.volume_get(context, volume['id'])
if volume['status'] == 'available':
self.update(context, volume, {"status": "attaching"})
else:
msg = _("Volume status must be available to reserve.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def unreserve_volume(self, context, volume):
if volume['status'] == "attaching":
self.update(context, volume, {"status": "available"})
@wrap_check_policy
def begin_detaching(self, context, volume):
# If we are in the middle of a volume migration, we don't want the user
# to see that the volume is 'detaching'. Having 'migration_status' set
# will have the same effect internally.
if volume['migration_status']:
return
if (volume['status'] != 'in-use' or
volume['attach_status'] != 'attached'):
msg = (_("Unable to detach volume. Volume status must be 'in-use' "
"and attach_status must be 'attached' to detach. "
"Currently: status: '%(status)s', "
"attach_status: '%(attach_status)s.'") %
{'status': volume['status'],
'attach_status': volume['attach_status']})
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self.update(context, volume, {"status": "detaching"})
@wrap_check_policy
def roll_detaching(self, context, volume):
if volume['status'] == "detaching":
self.update(context, volume, {"status": "in-use"})
@wrap_check_policy
def attach(self, context, volume, instance_uuid, host_name,
mountpoint, mode):
volume_metadata = self.get_volume_admin_metadata(context.elevated(),
volume)
if 'readonly' not in volume_metadata:
# NOTE(zhiyan): set a default value for read-only flag to metadata.
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': 'False'})
volume_metadata['readonly'] = 'False'
if volume_metadata['readonly'] == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume['id'])
return self.volume_rpcapi.attach_volume(context,
volume,
instance_uuid,
host_name,
mountpoint,
mode)
@wrap_check_policy
def detach(self, context, volume):
return self.volume_rpcapi.detach_volume(context, volume)
@wrap_check_policy
def initialize_connection(self, context, volume, connector):
LOG.debug('initialize connection for volume-id: %(volid)s, and '
'connector: %(connector)s.', {'volid': volume['id'],
'connector': connector})
return self.volume_rpcapi.initialize_connection(context,
volume,
connector)
@wrap_check_policy
def terminate_connection(self, context, volume, connector, force=False):
self.unreserve_volume(context, volume)
return self.volume_rpcapi.terminate_connection(context,
volume,
connector,
force)
@wrap_check_policy
def accept_transfer(self, context, volume, new_user, new_project):
return self.volume_rpcapi.accept_transfer(context,
volume,
new_user,
new_project)
def _create_snapshot(self, context,
volume, name, description,
force=False, metadata=None,
cgsnapshot_id=None):
snapshot = self.create_snapshot_in_db(
context, volume, name,
description, force, metadata, cgsnapshot_id)
self.volume_rpcapi.create_snapshot(context, volume, snapshot)
return snapshot
def create_snapshot_in_db(self, context,
volume, name, description,
force, metadata,
cgsnapshot_id):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating.")
raise exception.InvalidVolume(reason=msg)
if volume['status'].startswith('replica_'):
# Can't snapshot secondary replica
msg = _("Snapshot of secondary replica is not allowed.")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("Volume %(vol_id)s status must be available, "
"but current status is: "
"%(vol_status)s.") % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed).")
LOG.warn(msg, {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed).")
LOG.warn(msg, {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
self._check_metadata_properties(metadata)
options = {'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id'],
'metadata': metadata}
snapshot = None
try:
snapshot = self.db.snapshot_create(context, options)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
if snapshot:
self.db.snapshot_destroy(context, snapshot['id'])
finally:
QUOTAS.rollback(context, reservations)
return snapshot
def create_snapshots_in_db(self, context,
volume_list,
name, description,
force, cgsnapshot_id):
snapshot_list = []
for volume in volume_list:
self._create_snapshot_in_db_validate(context, volume, force)
reservations = self._create_snapshots_in_db_reserve(
context, volume_list)
options_list = []
for volume in volume_list:
options = self._create_snapshot_in_db_options(
context, volume, name, description, cgsnapshot_id)
options_list.append(options)
try:
for options in options_list:
snapshot = self.db.snapshot_create(context, options)
snapshot_list.append(snapshot)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
for snap in snapshot_list:
self.db.snapshot_destroy(context, snap['id'])
finally:
QUOTAS.rollback(context, reservations)
return snapshot_list
def _create_snapshot_in_db_validate(self, context, volume, force):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating.")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("Snapshot cannot be created because volume %(vol_id)s "
"is not available, current volume status: "
"%(vol_status)s.") % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
def _create_snapshots_in_db_reserve(self, context, volume_list):
reserve_opts_list = []
total_reserve_opts = {}
try:
for volume in volume_list:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1,
'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reserve_opts_list.append(reserve_opts)
for reserve_opts in reserve_opts_list:
for (key, value) in reserve_opts.items():
if key not in total_reserve_opts.keys():
total_reserve_opts[key] = value
else:
total_reserve_opts[key] = \
total_reserve_opts[key] + value
reservations = QUOTAS.reserve(context, **total_reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
return reservations
def _create_snapshot_in_db_options(self, context, volume,
name, description,
cgsnapshot_id):
options = {'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id']}
return options
def create_snapshot(self, context,
volume, name, description,
metadata=None, cgsnapshot_id=None):
return self._create_snapshot(context, volume, name, description,
False, metadata, cgsnapshot_id)
def create_snapshot_force(self, context,
volume, name,
description, metadata=None):
return self._create_snapshot(context, volume, name, description,
True, metadata)
@wrap_check_policy
def delete_snapshot(self, context, snapshot, force=False):
if not force and snapshot['status'] not in ["available", "error"]:
LOG.error(_LE('Unable to delete snapshot: %(snap_id)s, '
'due to invalid status. '
'Status must be available or '
'error, not %(snap_status)s.'),
{'snap_id': snapshot['id'],
'snap_status': snapshot['status']})
msg = _("Volume Snapshot status must be available or error.")
raise exception.InvalidSnapshot(reason=msg)
cgsnapshot_id = snapshot.get('cgsnapshot_id', None)
if cgsnapshot_id:
LOG.error(_LE('Unable to delete snapshot: %s, '
'because it is part of a consistency '
'group.'), snapshot['id'])
msg = _("Snapshot %s is part of a cgsnapshot and has to be "
"deleted together with the cgsnapshot.") % snapshot['id']
LOG.error(msg)
raise exception.InvalidSnapshot(reason=msg)
self.db.snapshot_update(context, snapshot['id'],
{'status': 'deleting'})
volume = self.db.volume_get(context, snapshot['volume_id'])
self.volume_rpcapi.delete_snapshot(context, snapshot, volume['host'])
LOG.info(_LI('Succesfully issued request to '
'delete snapshot: %s.'), snapshot['id'])
@wrap_check_policy
def update_snapshot(self, context, snapshot, fields):
self.db.snapshot_update(context, snapshot['id'], fields)
@wrap_check_policy
def get_volume_metadata(self, context, volume):
"""Get all metadata associated with a volume."""
rv = self.db.volume_metadata_get(context, volume['id'])
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_metadata(self, context, volume, key):
"""Delete the given metadata item from a volume."""
self.db.volume_metadata_delete(context, volume['id'], key)
def _check_metadata_properties(self, metadata=None):
if not metadata:
metadata = {}
for k, v in metadata.iteritems():
if len(k) == 0:
msg = _("Metadata property key blank.")
LOG.warn(msg)
raise exception.InvalidVolumeMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters.")
LOG.warn(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters.")
LOG.warn(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
@wrap_check_policy
def update_volume_metadata(self, context, volume, metadata, delete=False):
"""Updates or creates volume metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
db_meta = self.db.volume_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return db_meta
def get_volume_metadata_value(self, volume, key):
"""Get value of particular metadata key."""
metadata = volume.get('volume_metadata')
if metadata:
for i in volume['volume_metadata']:
if i['key'] == key:
return i['value']
return None
@wrap_check_policy
def get_volume_admin_metadata(self, context, volume):
"""Get all administration metadata associated with a volume."""
rv = self.db.volume_admin_metadata_get(context, volume['id'])
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_admin_metadata(self, context, volume, key):
"""Delete the given administration metadata item from a volume."""
self.db.volume_admin_metadata_delete(context, volume['id'], key)
@wrap_check_policy
def update_volume_admin_metadata(self, context, volume, metadata,
delete=False):
"""Updates or creates volume administration metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_admin_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
self.db.volume_admin_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return _metadata
def get_snapshot_metadata(self, context, snapshot):
"""Get all metadata associated with a snapshot."""
rv = self.db.snapshot_metadata_get(context, snapshot['id'])
return dict(rv.iteritems())
def delete_snapshot_metadata(self, context, snapshot, key):
"""Delete the given metadata item from a snapshot."""
self.db.snapshot_metadata_delete(context, snapshot['id'], key)
def update_snapshot_metadata(self, context,
snapshot, metadata,
delete=False):
"""Updates or creates snapshot metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_snapshot_metadata(context, snapshot)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
db_meta = self.db.snapshot_metadata_update(context,
snapshot['id'],
_metadata,
True)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return db_meta
def get_snapshot_metadata_value(self, snapshot, key):
pass
def get_volumes_image_metadata(self, context):
check_policy(context, 'get_volumes_image_metadata')
db_data = self.db.volume_glance_metadata_get_all(context)
results = collections.defaultdict(dict)
for meta_entry in db_data:
results[meta_entry['volume_id']].update({meta_entry['key']:
meta_entry['value']})
return results
@wrap_check_policy
def get_volume_image_metadata(self, context, volume):
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
return dict(
(meta_entry.key, meta_entry.value) for meta_entry in db_data
)
def _check_volume_availability(self, volume, force):
"""Check if the volume can be used."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume %(vol_id)s status must be '
'available or in-use, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
if not force and 'in-use' == volume['status']:
msg = _('Volume status is in-use.')
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def copy_volume_to_image(self, context, volume, metadata, force):
"""Create a new image from the specified volume."""
self._check_volume_availability(volume, force)
glance_core_properties = CONF.glance_core_properties
if glance_core_properties:
try:
volume_image_metadata = self.get_volume_image_metadata(context,
volume)
custom_property_set = (set(volume_image_metadata).difference
(set(glance_core_properties)))
if custom_property_set:
metadata.update(dict(properties=dict((custom_property,
volume_image_metadata
[custom_property])
for custom_property
in custom_property_set)))
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
recv_metadata = self.image_service.create(context, metadata)
self.update(context, volume, {'status': 'uploading'})
self.volume_rpcapi.copy_volume_to_image(context,
volume,
recv_metadata)
response = {"id": volume['id'],
"updated_at": volume['updated_at'],
"status": 'uploading',
"display_description": volume['display_description'],
"size": volume['size'],
"volume_type": volume['volume_type'],
"image_id": recv_metadata['id'],
"container_format": recv_metadata['container_format'],
"disk_format": recv_metadata['disk_format'],
"image_name": recv_metadata.get('name', None)}
return response
@wrap_check_policy
def extend(self, context, volume, new_size):
if volume['status'] != 'available':
msg = _('Volume %(vol_id)s status must be available '
'to extend, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
size_increase = (int(new_size)) - volume['size']
if size_increase <= 0:
msg = (_("New size for extend must be greater "
"than current size. (current: %(size)s, "
"extended: %(new_size)s).") % {'new_size': new_size,
'size': volume['size']})
raise exception.InvalidInput(reason=msg)
try:
reserve_opts = {'gigabytes': size_increase}
QUOTAS.add_volume_type_opts(context, reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as exc:
usages = exc.kwargs['usages']
quotas = exc.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
msg = _("Quota exceeded for %(s_pid)s, tried to extend volume by "
"%(s_size)sG, (%(d_consumed)dG of %(d_quota)dG already "
"consumed).")
LOG.error(msg % {'s_pid': context.project_id,
's_size': size_increase,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=size_increase,
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
self.update(context, volume, {'status': 'extending'})
self.volume_rpcapi.extend_volume(context, volume, new_size,
reservations)
@wrap_check_policy
def migrate_volume(self, context, volume, host, force_host_copy):
"""Migrate the volume to the specified host."""
# We only handle "available" volumes for now
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume %(vol_id)s status must be available or in-use, '
'but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure volume is not part of a migration
if volume['migration_status'] is not None:
msg = _("Volume %s is already part of an active "
"migration.") % volume['id']
raise exception.InvalidVolume(reason=msg)
# We only handle volumes without snapshots for now
snaps = self.db.snapshot_get_all_for_volume(context, volume['id'])
if snaps:
msg = _("Volume %s must not have snapshots.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# We only handle non-replicated volumes for now
rep_status = volume['replication_status']
if rep_status is not None and rep_status != 'disabled':
msg = _("Volume %s must not be replicated.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
msg = _("Volume %s must not be part of a consistency "
"group.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure the host is in the list of available hosts
elevated = context.elevated()
topic = CONF.volume_topic
services = self.db.service_get_all_by_topic(elevated,
topic,
disabled=False)
found = False
for service in services:
svc_host = volume_utils.extract_host(host, 'backend')
if utils.service_is_up(service) and service['host'] == svc_host:
found = True
if not found:
msg = _('No available service named %s') % host
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
# Make sure the destination host is different than the current one
if host == volume['host']:
msg = _('Destination host must be different '
'than the current host.')
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
self.update(context, volume, {'migration_status': 'starting'})
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume_type = {}
volume_type_id = volume['volume_type_id']
if volume_type_id:
volume_type = volume_types.get_volume_type(context, volume_type_id)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id']}
self.scheduler_rpcapi.migrate_volume_to_host(context,
CONF.volume_topic,
volume['id'],
host,
force_host_copy,
request_spec)
@wrap_check_policy
def migrate_volume_completion(self, context, volume, new_volume, error):
# This is a volume swap initiated by Nova, not Cinder. Nova expects
# us to return the new_volume_id.
if not (volume['migration_status'] or new_volume['migration_status']):
return new_volume['id']
if not volume['migration_status']:
msg = _('Source volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
if not new_volume['migration_status']:
msg = _('Destination volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
expected_status = 'target:%s' % volume['id']
if not new_volume['migration_status'] == expected_status:
msg = (_('Destination has migration_status %(stat)s, expected '
'%(exp)s.') % {'stat': new_volume['migration_status'],
'exp': expected_status})
raise exception.InvalidVolume(reason=msg)
return self.volume_rpcapi.migrate_volume_completion(context, volume,
new_volume, error)
@wrap_check_policy
def update_readonly_flag(self, context, volume, flag):
if volume['status'] != 'available':
msg = _('Volume %(vol_id)s status must be available '
'to update readonly flag, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': six.text_type(flag)})
@wrap_check_policy
def retype(self, context, volume, new_type, migration_policy=None):
"""Attempt to modify the type associated with an existing volume."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Unable to update type due to incorrect status: '
'%(vol_status)s on volume: %(vol_id)s. Volume status '
'must be available or '
'in-use.') % {'vol_status': volume['status'],
'vol_id': volume['id']}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if volume['migration_status'] is not None:
msg = (_("Volume %s is already part of an active migration.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if migration_policy and migration_policy not in ['on-demand', 'never']:
msg = _('migration_policy must be \'on-demand\' or \'never\', '
'passed: %s') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
msg = _("Volume must not be part of a consistency group.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Support specifying volume type by ID or name
try:
if uuidutils.is_uuid_like(new_type):
vol_type = volume_types.get_volume_type(context, new_type)
else:
vol_type = volume_types.get_volume_type_by_name(context,
new_type)
except exception.InvalidVolumeType:
msg = _('Invalid volume_type passed: %s.') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
vol_type_id = vol_type['id']
vol_type_qos_id = vol_type['qos_specs_id']
old_vol_type = None
old_vol_type_id = volume['volume_type_id']
old_vol_type_qos_id = None
# Error if the original and new type are the same
if volume['volume_type_id'] == vol_type_id:
msg = _('New volume_type same as original: %s.') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if volume['volume_type_id']:
old_vol_type = volume_types.get_volume_type(
context, old_vol_type_id)
old_vol_type_qos_id = old_vol_type['qos_specs_id']
# We don't support changing encryption requirements yet
old_enc = volume_types.get_volume_type_encryption(context,
old_vol_type_id)
new_enc = volume_types.get_volume_type_encryption(context,
vol_type_id)
if old_enc != new_enc:
msg = _('Retype cannot change encryption requirements.')
raise exception.InvalidInput(reason=msg)
# We don't support changing QoS at the front-end yet for in-use volumes
# TODO(avishay): Call Nova to change QoS setting (libvirt has support
# - virDomainSetBlockIoTune() - Nova does not have support yet).
if (volume['status'] != 'available' and
old_vol_type_qos_id != vol_type_qos_id):
for qos_id in [old_vol_type_qos_id, vol_type_qos_id]:
if qos_id:
specs = qos_specs.get_qos_specs(context.elevated(), qos_id)
if specs['consumer'] != 'back-end':
msg = _('Retype cannot change front-end qos specs for '
'in-use volume: %s.') % volume['id']
raise exception.InvalidInput(reason=msg)
# We're checking here in so that we can report any quota issues as
# early as possible, but won't commit until we change the type. We
# pass the reservations onward in case we need to roll back.
reservations = quota_utils.get_volume_type_reservation(context, volume,
vol_type_id)
self.update(context, volume, {'status': 'retyping'})
request_spec = {'volume_properties': volume,
'volume_id': volume['id'],
'volume_type': vol_type,
'migration_policy': migration_policy,
'quota_reservations': reservations}
self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume['id'],
request_spec=request_spec,
filter_properties={})
def manage_existing(self, context, host, ref, name=None, description=None,
volume_type=None, metadata=None,
availability_zone=None, bootable=False):
if availability_zone is None:
elevated = context.elevated()
try:
svc_host = volume_utils.extract_host(host, 'backend')
service = self.db.service_get_by_host_and_topic(
elevated, svc_host, CONF.volume_topic)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to find service for given host.'))
availability_zone = service.get('availability_zone')
volume_type_id = volume_type['id'] if volume_type else None
volume_properties = {
'size': 0,
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
# Rename these to the internal name.
'display_description': description,
'display_name': name,
'host': host,
'availability_zone': availability_zone,
'volume_type_id': volume_type_id,
'metadata': metadata,
'bootable': bootable
}
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume = self.db.volume_create(context, volume_properties)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id'],
'ref': ref}
self.scheduler_rpcapi.manage_existing(context, CONF.volume_topic,
volume['id'],
request_spec=request_spec)
return volume
class HostAPI(base.Base):
def __init__(self):
super(HostAPI, self).__init__()
"""Sub-set of the Volume Manager API for managing host operations."""
def set_host_enabled(self, context, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
raise NotImplementedError()
def get_host_uptime(self, context, host):
"""Returns the result of calling "uptime" on the target host."""
raise NotImplementedError()
def host_power_action(self, context, host, action):
raise NotImplementedError()
def set_host_maintenance(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
volume evacuation.
"""
raise NotImplementedError()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import read_int, write_int, write_with_length, UTF8Deserializer
class TaskContext(object):
"""
Contextual information about a task which can be read or mutated during
execution. To access the TaskContext for a running task, use:
:meth:`TaskContext.get`.
"""
_taskContext = None
_attemptNumber = None
_partitionId = None
_stageId = None
_taskAttemptId = None
_localProperties = None
_resources = None
def __new__(cls):
"""Even if users construct TaskContext instead of using get, give them the singleton."""
taskContext = cls._taskContext
if taskContext is not None:
return taskContext
cls._taskContext = taskContext = object.__new__(cls)
return taskContext
@classmethod
def _getOrCreate(cls):
"""Internal function to get or create global TaskContext."""
if cls._taskContext is None:
cls._taskContext = TaskContext()
return cls._taskContext
@classmethod
def _setTaskContext(cls, taskContext):
cls._taskContext = taskContext
@classmethod
def get(cls):
"""
Return the currently active TaskContext. This can be called inside of
user functions to access contextual information about running tasks.
Notes
-----
Must be called on the worker, not the driver. Returns None if not initialized.
"""
return cls._taskContext
def stageId(self):
"""The ID of the stage that this task belong to."""
return self._stageId
def partitionId(self):
"""
The ID of the RDD partition that is computed by this task.
"""
return self._partitionId
def attemptNumber(self):
""""
How many times this task has been attempted. The first task attempt will be assigned
attemptNumber = 0, and subsequent attempts will have increasing attempt numbers.
"""
return self._attemptNumber
def taskAttemptId(self):
"""
An ID that is unique to this task attempt (within the same SparkContext, no two task
attempts will share the same attempt ID). This is roughly equivalent to Hadoop's
TaskAttemptID.
"""
return self._taskAttemptId
def getLocalProperty(self, key):
"""
Get a local property set upstream in the driver, or None if it is missing.
"""
return self._localProperties.get(key, None)
def resources(self):
"""
Resources allocated to the task. The key is the resource name and the value is information
about the resource.
"""
return self._resources
BARRIER_FUNCTION = 1
ALL_GATHER_FUNCTION = 2
def _load_from_socket(port, auth_secret, function, all_gather_message=None):
"""
Load data from a given socket, this is a blocking method thus only return when the socket
connection has been closed.
"""
(sockfile, sock) = local_connect_and_auth(port, auth_secret)
# The call may block forever, so no timeout
sock.settimeout(None)
if function == BARRIER_FUNCTION:
# Make a barrier() function call.
write_int(function, sockfile)
elif function == ALL_GATHER_FUNCTION:
# Make a all_gather() function call.
write_int(function, sockfile)
write_with_length(all_gather_message.encode("utf-8"), sockfile)
else:
raise ValueError("Unrecognized function type")
sockfile.flush()
# Collect result.
len = read_int(sockfile)
res = []
for i in range(len):
res.append(UTF8Deserializer().loads(sockfile))
# Release resources.
sockfile.close()
sock.close()
return res
class BarrierTaskContext(TaskContext):
"""
A :class:`TaskContext` with extra contextual info and tooling for tasks in a barrier stage.
Use :func:`BarrierTaskContext.get` to obtain the barrier context for a running barrier task.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
_port = None
_secret = None
@classmethod
def _getOrCreate(cls):
"""
Internal function to get or create global BarrierTaskContext. We need to make sure
BarrierTaskContext is returned from here because it is needed in python worker reuse
scenario, see SPARK-25921 for more details.
"""
if not isinstance(cls._taskContext, BarrierTaskContext):
cls._taskContext = object.__new__(cls)
return cls._taskContext
@classmethod
def get(cls):
"""
Return the currently active :class:`BarrierTaskContext`.
This can be called inside of user functions to access contextual information about
running tasks.
Notes
-----
Must be called on the worker, not the driver. Returns None if not initialized.
An Exception will raise if it is not in a barrier stage.
This API is experimental
"""
if not isinstance(cls._taskContext, BarrierTaskContext):
raise RuntimeError('It is not in a barrier stage')
return cls._taskContext
@classmethod
def _initialize(cls, port, secret):
"""
Initialize BarrierTaskContext, other methods within BarrierTaskContext can only be called
after BarrierTaskContext is initialized.
"""
cls._port = port
cls._secret = secret
def barrier(self):
"""
Sets a global barrier and waits until all tasks in this stage hit this barrier.
Similar to `MPI_Barrier` function in MPI, this function blocks until all tasks
in the same stage have reached this routine.
.. versionadded:: 2.4.0
.. warning:: In a barrier stage, each task much have the same number of `barrier()`
calls, in all possible code branches.
Otherwise, you may get the job hanging or a SparkException after timeout.
Notes
-----
This API is experimental
"""
if self._port is None or self._secret is None:
raise RuntimeError("Not supported to call barrier() before initialize " +
"BarrierTaskContext.")
else:
_load_from_socket(self._port, self._secret, BARRIER_FUNCTION)
def allGather(self, message=""):
"""
This function blocks until all tasks in the same stage have reached this routine.
Each task passes in a message and returns with a list of all the messages passed in
by each of those tasks.
.. versionadded:: 3.0.0
.. warning:: In a barrier stage, each task much have the same number of `allGather()`
calls, in all possible code branches.
Otherwise, you may get the job hanging or a SparkException after timeout.
Notes
-----
This API is experimental
"""
if not isinstance(message, str):
raise TypeError("Argument `message` must be of type `str`")
elif self._port is None or self._secret is None:
raise RuntimeError("Not supported to call barrier() before initialize " +
"BarrierTaskContext.")
else:
return _load_from_socket(self._port, self._secret, ALL_GATHER_FUNCTION, message)
def getTaskInfos(self):
"""
Returns :class:`BarrierTaskInfo` for all tasks in this barrier stage,
ordered by partition ID.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
if self._port is None or self._secret is None:
raise RuntimeError("Not supported to call getTaskInfos() before initialize " +
"BarrierTaskContext.")
else:
addresses = self._localProperties.get("addresses", "")
return [BarrierTaskInfo(h.strip()) for h in addresses.split(",")]
class BarrierTaskInfo(object):
"""
Carries all task infos of a barrier task.
.. versionadded:: 2.4.0
Attributes
----------
address : str
The IPv4 address (host:port) of the executor that the barrier task is running on
Notes
-----
This API is experimental
"""
def __init__(self, address):
self.address = address
|
|
import socket as _orig_sock
from tests import LimitedTestCase, skip_with_pyevent, main, skipped
from eventlet import event
from eventlet import greenio
from eventlet import debug
from eventlet.green import socket
from eventlet.green import time
from eventlet.green.socket import GreenSSLObject
import errno
import eventlet
import os
import sys
import array
def bufsized(sock, size=1):
""" Resize both send and receive buffers on a socket.
Useful for testing trampoline. Returns the socket.
>>> import socket
>>> sock = bufsized(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
"""
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, size)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, size)
return sock
def min_buf_size():
"""Return the minimum buffer size that the platform supports."""
test_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
test_sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1)
return test_sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
class TestGreenIo(LimitedTestCase):
def test_connect_timeout(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
gs = greenio.GreenSocket(s)
try:
gs.connect(('192.0.2.1', 80))
self.fail("socket.timeout not raised")
except socket.timeout, e:
self.assert_(hasattr(e, 'args'))
self.assertEqual(e.args[0], 'timed out')
except socket.error, e:
# unreachable is also a valid outcome
if not e[0] in (errno.EHOSTUNREACH, errno.ENETUNREACH):
raise
def test_accept_timeout(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
s.listen(50)
s.settimeout(0.1)
gs = greenio.GreenSocket(s)
try:
gs.accept()
self.fail("socket.timeout not raised")
except socket.timeout, e:
self.assert_(hasattr(e, 'args'))
self.assertEqual(e.args[0], 'timed out')
def test_connect_ex_timeout(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
gs = greenio.GreenSocket(s)
e = gs.connect_ex(('192.0.2.1', 80))
if not e in (errno.EHOSTUNREACH, errno.ENETUNREACH):
self.assertEquals(e, errno.EAGAIN)
def test_recv_timeout(self):
listener = greenio.GreenSocket(socket.socket())
listener.bind(('', 0))
listener.listen(50)
evt = event.Event()
def server():
# accept the connection in another greenlet
sock, addr = listener.accept()
evt.wait()
gt = eventlet.spawn(server)
addr = listener.getsockname()
client = greenio.GreenSocket(socket.socket())
client.settimeout(0.1)
client.connect(addr)
try:
r = client.recv(8192)
self.fail("socket.timeout not raised")
except socket.timeout, e:
self.assert_(hasattr(e, 'args'))
self.assertEqual(e.args[0], 'timed out')
evt.send()
gt.wait()
def test_recvfrom_timeout(self):
gs = greenio.GreenSocket(
socket.socket(socket.AF_INET, socket.SOCK_DGRAM))
gs.settimeout(.1)
gs.bind(('', 0))
try:
gs.recvfrom(8192)
self.fail("socket.timeout not raised")
except socket.timeout, e:
self.assert_(hasattr(e, 'args'))
self.assertEqual(e.args[0], 'timed out')
def test_recvfrom_into_timeout(self):
buf = buffer(array.array('B'))
gs = greenio.GreenSocket(
socket.socket(socket.AF_INET, socket.SOCK_DGRAM))
gs.settimeout(.1)
gs.bind(('', 0))
try:
gs.recvfrom_into(buf)
self.fail("socket.timeout not raised")
except socket.timeout, e:
self.assert_(hasattr(e, 'args'))
self.assertEqual(e.args[0], 'timed out')
def test_recv_into_timeout(self):
buf = buffer(array.array('B'))
listener = greenio.GreenSocket(socket.socket())
listener.bind(('', 0))
listener.listen(50)
evt = event.Event()
def server():
# accept the connection in another greenlet
sock, addr = listener.accept()
evt.wait()
gt = eventlet.spawn(server)
addr = listener.getsockname()
client = greenio.GreenSocket(socket.socket())
client.settimeout(0.1)
client.connect(addr)
try:
r = client.recv_into(buf)
self.fail("socket.timeout not raised")
except socket.timeout, e:
self.assert_(hasattr(e, 'args'))
self.assertEqual(e.args[0], 'timed out')
evt.send()
gt.wait()
def test_send_timeout(self):
listener = bufsized(eventlet.listen(('', 0)))
evt = event.Event()
def server():
# accept the connection in another greenlet
sock, addr = listener.accept()
sock = bufsized(sock)
evt.wait()
gt = eventlet.spawn(server)
addr = listener.getsockname()
client = bufsized(greenio.GreenSocket(socket.socket()))
client.connect(addr)
try:
client.settimeout(0.00001)
msg = "A"*(100000) # large enough number to overwhelm most buffers
total_sent = 0
# want to exceed the size of the OS buffer so it'll block in a
# single send
for x in range(10):
total_sent += client.send(msg)
self.fail("socket.timeout not raised")
except socket.timeout, e:
self.assert_(hasattr(e, 'args'))
self.assertEqual(e.args[0], 'timed out')
evt.send()
gt.wait()
def test_sendall_timeout(self):
listener = greenio.GreenSocket(socket.socket())
listener.bind(('', 0))
listener.listen(50)
evt = event.Event()
def server():
# accept the connection in another greenlet
sock, addr = listener.accept()
evt.wait()
gt = eventlet.spawn(server)
addr = listener.getsockname()
client = greenio.GreenSocket(socket.socket())
client.settimeout(0.1)
client.connect(addr)
try:
msg = "A"*(8*1024*1024)
# want to exceed the size of the OS buffer so it'll block
client.sendall(msg)
self.fail("socket.timeout not raised")
except socket.timeout, e:
self.assert_(hasattr(e, 'args'))
self.assertEqual(e.args[0], 'timed out')
evt.send()
gt.wait()
def test_close_with_makefile(self):
def accept_close_early(listener):
# verify that the makefile and the socket are truly independent
# by closing the socket prior to using the made file
try:
conn, addr = listener.accept()
fd = conn.makefile()
conn.close()
fd.write('hello\n')
fd.close()
# socket._fileobjects are odd: writes don't check
# whether the socket is closed or not, and you get an
# AttributeError during flush if it is closed
fd.write('a')
self.assertRaises(Exception, fd.flush)
self.assertRaises(socket.error, conn.send, 'b')
finally:
listener.close()
def accept_close_late(listener):
# verify that the makefile and the socket are truly independent
# by closing the made file and then sending a character
try:
conn, addr = listener.accept()
fd = conn.makefile()
fd.write('hello')
fd.close()
conn.send('\n')
conn.close()
fd.write('a')
self.assertRaises(Exception, fd.flush)
self.assertRaises(socket.error, conn.send, 'b')
finally:
listener.close()
def did_it_work(server):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', server.getsockname()[1]))
fd = client.makefile()
client.close()
assert fd.readline() == 'hello\n'
assert fd.read() == ''
fd.close()
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
server.bind(('0.0.0.0', 0))
server.listen(50)
killer = eventlet.spawn(accept_close_early, server)
did_it_work(server)
killer.wait()
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
server.bind(('0.0.0.0', 0))
server.listen(50)
killer = eventlet.spawn(accept_close_late, server)
did_it_work(server)
killer.wait()
def test_del_closes_socket(self):
def accept_once(listener):
# delete/overwrite the original conn
# object, only keeping the file object around
# closing the file object should close everything
try:
conn, addr = listener.accept()
conn = conn.makefile()
conn.write('hello\n')
conn.close()
conn.write('a')
self.assertRaises(Exception, conn.flush)
finally:
listener.close()
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
server.bind(('127.0.0.1', 0))
server.listen(50)
killer = eventlet.spawn(accept_once, server)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', server.getsockname()[1]))
fd = client.makefile()
client.close()
assert fd.read() == 'hello\n'
assert fd.read() == ''
killer.wait()
def test_full_duplex(self):
large_data = '*' * 10 * min_buf_size()
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
listener.bind(('127.0.0.1', 0))
listener.listen(50)
bufsized(listener)
def send_large(sock):
sock.sendall(large_data)
def read_large(sock):
result = sock.recv(len(large_data))
expected = 'hello world'
while len(result) < len(large_data):
result += sock.recv(len(large_data))
self.assertEquals(result, large_data)
def server():
(sock, addr) = listener.accept()
sock = bufsized(sock)
send_large_coro = eventlet.spawn(send_large, sock)
eventlet.sleep(0)
result = sock.recv(10)
expected = 'hello world'
while len(result) < len(expected):
result += sock.recv(10)
self.assertEquals(result, expected)
send_large_coro.wait()
server_evt = eventlet.spawn(server)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', listener.getsockname()[1]))
bufsized(client)
large_evt = eventlet.spawn(read_large, client)
eventlet.sleep(0)
client.sendall('hello world')
server_evt.wait()
large_evt.wait()
client.close()
def test_sendall(self):
# test adapted from Marcus Cavanaugh's email
# it may legitimately take a while, but will eventually complete
self.timer.cancel()
second_bytes = 10
def test_sendall_impl(many_bytes):
bufsize = max(many_bytes/15, 2)
def sender(listener):
(sock, addr) = listener.accept()
sock = bufsized(sock, size=bufsize)
sock.sendall('x'*many_bytes)
sock.sendall('y'*second_bytes)
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
listener.bind(("", 0))
listener.listen(50)
sender_coro = eventlet.spawn(sender, listener)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', listener.getsockname()[1]))
bufsized(client, size=bufsize)
total = 0
while total < many_bytes:
data = client.recv(min(many_bytes - total, many_bytes/10))
if data == '':
break
total += len(data)
total2 = 0
while total < second_bytes:
data = client.recv(second_bytes)
if data == '':
break
total2 += len(data)
sender_coro.wait()
client.close()
for bytes in (1000, 10000, 100000, 1000000):
test_sendall_impl(bytes)
def test_wrap_socket(self):
try:
import ssl
except ImportError:
pass # pre-2.6
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', 0))
sock.listen(50)
ssl_sock = ssl.wrap_socket(sock)
def test_timeout_and_final_write(self):
# This test verifies that a write on a socket that we've
# stopped listening for doesn't result in an incorrect switch
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
server.bind(('127.0.0.1', 0))
server.listen(50)
bound_port = server.getsockname()[1]
def sender(evt):
s2, addr = server.accept()
wrap_wfile = s2.makefile()
eventlet.sleep(0.02)
wrap_wfile.write('hi')
s2.close()
evt.send('sent via event')
from eventlet import event
evt = event.Event()
eventlet.spawn(sender, evt)
eventlet.sleep(0) # lets the socket enter accept mode, which
# is necessary for connect to succeed on windows
try:
# try and get some data off of this pipe
# but bail before any is sent
eventlet.Timeout(0.01)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', bound_port))
wrap_rfile = client.makefile()
_c = wrap_rfile.read(1)
self.fail()
except eventlet.TimeoutError:
pass
result = evt.wait()
self.assertEquals(result, 'sent via event')
server.close()
client.close()
def test_pipe_read(self):
# ensure that 'readline' works properly on GreenPipes when data is not
# immediately available (fd is nonblocking, was raising EAGAIN)
# also ensures that readline() terminates on '\n' and '\r\n'
r, w = os.pipe()
r = os.fdopen(r)
w = os.fdopen(w, 'w')
r = greenio.GreenPipe(r)
w = greenio.GreenPipe(w)
def writer():
eventlet.sleep(.1)
w.write('line\n')
w.flush()
w.write('line\r\n')
w.flush()
gt = eventlet.spawn(writer)
eventlet.sleep(0)
line = r.readline()
self.assertEquals(line, 'line\n')
line = r.readline()
self.assertEquals(line, 'line\r\n')
gt.wait()
class TestGreenIoLong(LimitedTestCase):
TEST_TIMEOUT=10 # the test here might take a while depending on the OS
@skip_with_pyevent
def test_multiple_readers(self, clibufsize=False):
recvsize = 2 * min_buf_size()
sendsize = 10 * recvsize
# test that we can have multiple coroutines reading
# from the same fd. We make no guarantees about which one gets which
# bytes, but they should both get at least some
def reader(sock, results):
while True:
data = sock.recv(recvsize)
if data == '':
break
results.append(data)
results1 = []
results2 = []
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
listener.bind(('127.0.0.1', 0))
listener.listen(50)
def server():
(sock, addr) = listener.accept()
sock = bufsized(sock)
try:
c1 = eventlet.spawn(reader, sock, results1)
c2 = eventlet.spawn(reader, sock, results2)
try:
c1.wait()
c2.wait()
finally:
c1.kill()
c2.kill()
finally:
sock.close()
server_coro = eventlet.spawn(server)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', listener.getsockname()[1]))
if clibufsize:
bufsized(client, size=sendsize)
else:
bufsized(client)
client.sendall('*' * sendsize)
client.close()
server_coro.wait()
listener.close()
self.assert_(len(results1) > 0)
self.assert_(len(results2) > 0)
@skipped # by rdw because it fails but it's not clear how to make it pass
@skip_with_pyevent
def test_multiple_readers2(self):
self.test_multiple_readers(clibufsize=True)
class TestGreenIoStarvation(LimitedTestCase):
# fixme: this doesn't succeed, because of eventlet's predetermined
# ordering. two processes, one with server, one with client eventlets
# might be more reliable?
TEST_TIMEOUT=300 # the test here might take a while depending on the OS
@skipped # by rdw, because it fails but it's not clear how to make it pass
@skip_with_pyevent
def test_server_starvation(self, sendloops=15):
recvsize = 2 * min_buf_size()
sendsize = 10000 * recvsize
results = [[] for i in xrange(5)]
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(50)
base_time = time.time()
def server(my_results):
(sock, addr) = listener.accept()
datasize = 0
t1 = None
t2 = None
try:
while True:
data = sock.recv(recvsize)
if not t1:
t1 = time.time() - base_time
if data == '':
t2 = time.time() - base_time
my_results.append(datasize)
my_results.append((t1,t2))
break
datasize += len(data)
finally:
sock.close()
def client():
pid = os.fork()
if pid:
return pid
client = _orig_sock.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', port))
bufsized(client, size=sendsize)
for i in range(sendloops):
client.sendall('*' * sendsize)
client.close()
os._exit(0)
clients = []
servers = []
for r in results:
servers.append(eventlet.spawn(server, r))
for r in results:
clients.append(client())
for s in servers:
s.wait()
for c in clients:
os.waitpid(c, 0)
listener.close()
# now test that all of the server receive intervals overlap, and
# that there were no errors.
for r in results:
assert len(r) == 2, "length is %d not 2!: %s\n%s" % (len(r), r, results)
assert r[0] == sendsize * sendloops
assert len(r[1]) == 2
assert r[1][0] is not None
assert r[1][1] is not None
starttimes = sorted(r[1][0] for r in results)
endtimes = sorted(r[1][1] for r in results)
runlengths = sorted(r[1][1] - r[1][0] for r in results)
# assert that the last task started before the first task ended
# (our no-starvation condition)
assert starttimes[-1] < endtimes[0], "Not overlapping: starts %s ends %s" % (starttimes, endtimes)
maxstartdiff = starttimes[-1] - starttimes[0]
assert maxstartdiff * 2 < runlengths[0], "Largest difference in starting times more than twice the shortest running time!"
assert runlengths[0] * 2 > runlengths[-1], "Longest runtime more than twice as long as shortest!"
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.db import exception as db_exc
import pecan
import yaml
from solum.api.controllers.v1.datamodel import plan as planmodel
from solum.api.controllers.v1 import plan
from solum.api.handlers import plan_handler
from solum.common import exception
from solum import objects
from solum.tests import base
from solum.tests import fakes
class TestPlanModuleFunctions(base.BaseTestCase):
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
def test_yaml_content(self, mock_req):
m = fakes.FakePlan()
ref_content = plan.yaml_content(m)
self.assertEqual(ref_content['uri'], '%s/v1/plans/%s' %
(pecan.request.host_url, m.uuid))
@mock.patch('solum.api.controllers.v1.plan.init_plan_v1')
def test_init_plan_by_version(self, init_plan_v1):
yml_input_plan = {'version': 1, 'name': 'plan1', 'description': 'dsc'}
plan.init_plan_by_version(yml_input_plan)
init_plan_v1.assert_called_once()
@mock.patch('solum.api.controllers.v1.plan.init_plan_v1')
def test_init_plan_by_version_missing(self, init_plan_v1):
yml_input_plan = {'name': 'plan1', 'description': 'dsc'}
self.assertRaises(exception.BadRequest, plan.init_plan_by_version,
yml_input_plan)
init_plan_v1.assert_called_once()
@mock.patch('solum.api.controllers.v1.plan.init_plan_v1')
def test_init_plan_by_version_not_existing(self, init_plan_v1):
yml_input_plan = {'version': 424242424242424242, 'name': 'plan1',
'description': 'dsc'}
self.assertRaises(exception.BadRequest, plan.init_plan_by_version,
yml_input_plan)
init_plan_v1.assert_called_once()
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
def test_init_plan_v1(self, mock_req):
yml_input_plan = {'version': 1, 'name': 'plan1', 'description': 'dsc'}
hand_v1, plan_v1 = plan.init_plan_v1(yml_input_plan)
self.assertIsInstance(hand_v1, plan_handler.PlanHandler)
self.assertIsInstance(plan_v1, planmodel.Plan)
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)
@mock.patch('solum.api.handlers.plan_handler.PlanHandler')
class TestPlanController(base.BaseTestCase):
def setUp(self):
super(TestPlanController, self).setUp()
objects.load()
def test_plan_get(self, PlanHandler, resp_mock, request_mock):
hand_get = PlanHandler.return_value.get
fake_plan = fakes.FakePlan()
hand_get.return_value = fake_plan
cont = plan.PlanController('test_id')
resp = cont.get()
self.assertIsNotNone(resp)
resp_yml = yaml.load(resp)
self.assertEqual(fake_plan.raw_content['name'], resp_yml['name'])
hand_get.assert_called_with('test_id')
self.assertEqual(200, resp_mock.status)
def test_plan_get_not_found(self, PlanHandler, resp_mock, request_mock):
hand_get = PlanHandler.return_value.get
hand_get.side_effect = exception.ResourceNotFound(name='plan',
id='test_id')
plan.PlanController('test_id').get()
hand_get.assert_called_with('test_id')
self.assertEqual(404, resp_mock.status)
def test_plan_put_none(self, PlanHandler, resp_mock, request_mock):
request_mock.content_type = 'application/x-yaml'
request_mock.body = ''
hand_update = PlanHandler.return_value.update
hand_update.return_value = fakes.FakePlan()
plan.PlanController('test_id').put()
self.assertEqual(400, resp_mock.status)
def test_plan_put_invalid_yaml(self, PlanHandler, resp_mock, request_mock):
request_mock.content_type = 'application/x-yaml'
request_mock.body = 'invalid yaml file'
hand_update = PlanHandler.return_value.update
hand_update.return_value = fakes.FakePlan()
plan.PlanController('test_id').put()
self.assertEqual(400, resp_mock.status)
def test_plan_put_empty_yaml(self, PlanHandler, resp_mock, request_mock):
request_mock.content_type = 'application/x-yaml'
request_mock.body = '{}'
hand_update = PlanHandler.return_value.update
hand_update.return_value = fakes.FakePlan()
plan.PlanController('test_id').put()
self.assertEqual(400, resp_mock.status)
def test_plan_put_not_found(self, PlanHandler, resp_mock, request_mock):
data = 'version: 1\nname: ex_plan1\ndescription: dsc1.'
request_mock.body = data
request_mock.content_type = 'application/x-yaml'
hand_update = PlanHandler.return_value.update
hand_update.side_effect = exception.ResourceNotFound(
name='plan', plan_id='test_id')
plan.PlanController('test_id').put()
hand_update.assert_called_with('test_id', {'name': 'ex_plan1',
'description': u'dsc1.'})
self.assertEqual(404, resp_mock.status)
def test_plan_put_ok(self, PlanHandler, resp_mock, request_mock):
data = 'version: 1\nname: ex_plan1\ndescription: dsc1.'
request_mock.body = data
request_mock.content_type = 'application/x-yaml'
hand_update = PlanHandler.return_value.update
hand_update.return_value = fakes.FakePlan()
plan.PlanController('test_id').put()
hand_update.assert_called_with('test_id', {'name': 'ex_plan1',
'description': u'dsc1.'})
self.assertEqual(200, resp_mock.status)
def test_plan_put_version_not_found(self, PlanHandler,
resp_mock, request_mock):
data = 'name: ex_plan1\ndescription: yaml plan1.\nversion: 2'
request_mock.body = data
request_mock.content_type = 'application/x-yaml'
hand_update = PlanHandler.return_value.update
hand_update.return_value = fakes.FakePlan()
plan.PlanController('test_id').put()
self.assertEqual(400, resp_mock.status)
def test_plan_delete_not_found(self, PlanHandler, resp_mock, request_mock):
hand_delete = PlanHandler.return_value.delete
hand_delete.side_effect = exception.ResourceNotFound(
name='plan', plan_id='test_id')
obj = plan.PlanController('test_id')
obj.delete()
hand_delete.assert_called_with('test_id')
self.assertEqual(404, resp_mock.status)
def test_plan_delete_ok(self, PlanHandler, resp_mock, request_mock):
hand_delete = PlanHandler.return_value.delete
hand_delete.return_value = None
obj = plan.PlanController('test_id')
obj.delete()
hand_delete.assert_called_with('test_id')
self.assertEqual(204, resp_mock.status)
def test_plan_delete_dbreferror(self, PlanHandler, resp_mock,
request_mock):
hand_delete = PlanHandler.return_value.delete
hand_delete.side_effect = db_exc.DBReferenceError(
mock.ANY, mock.ANY, mock.ANY, mock.ANY)
obj = plan.PlanController('test_id')
obj.delete()
hand_delete.assert_called_with('test_id')
self.assertEqual(409, resp_mock.status)
def test_plan_delete_othererror(self, PlanHandler, resp_mock,
request_mock):
hand_delete = PlanHandler.return_value.delete
hand_delete.side_effect = db_exc.DBError()
obj = plan.PlanController('test_id')
obj.delete()
hand_delete.assert_called_with('test_id')
self.assertEqual(500, resp_mock.status)
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)
@mock.patch('solum.api.handlers.plan_handler.PlanHandler')
class TestPlansController(base.BaseTestCase):
def setUp(self):
super(TestPlansController, self).setUp()
objects.load()
def test_plans_get_all(self, PlanHandler, resp_mock, request_mock):
hand_get = PlanHandler.return_value.get_all
fake_plan = fakes.FakePlan()
hand_get.return_value = [fake_plan]
resp = plan.PlansController().get_all()
self.assertIsNotNone(resp)
resp_yml = yaml.load(resp)
self.assertEqual(fake_plan.raw_content['name'], resp_yml[0]['name'])
self.assertEqual(200, resp_mock.status)
hand_get.assert_called_with()
def test_plans_post(self, PlanHandler, resp_mock, request_mock):
request_mock.body = 'version: 1\nname: ex_plan1\ndescription: dsc1.'
request_mock.content_type = 'application/x-yaml'
hand_create = PlanHandler.return_value.create
hand_create.return_value = fakes.FakePlan()
plan.PlansController().post()
hand_create.assert_called_with({'name': 'ex_plan1',
'description': 'dsc1.'})
self.assertEqual(201, resp_mock.status)
def test_plans_post_version_not_found(self, PlanHandler,
resp_mock, request_mock):
request_mock.body = 'version: 2\nname: ex_plan1\ndescription: dsc1.'
request_mock.content_type = 'application/x-yaml'
hand_create = PlanHandler.return_value.create
hand_create.return_value = fakes.FakePlan()
plan.PlansController().post()
self.assertEqual(400, resp_mock.status)
def test_plans_post_nodata(self, handler_mock, resp_mock, request_mock):
request_mock.body = ''
request_mock.content_type = 'application/json'
handler_create = handler_mock.return_value.create
handler_create.return_value = fakes.FakePlan()
plan.PlansController().post()
self.assertEqual(400, resp_mock.status)
def test_plans_post_invalid_yaml(self, handler_mock,
resp_mock, request_mock):
request_mock.body = 'invalid yaml file'
request_mock.content_type = 'application/json'
handler_create = handler_mock.return_value.create
handler_create.return_value = fakes.FakePlan()
plan.PlansController().post()
self.assertEqual(400, resp_mock.status)
def test_plans_post_empty_yaml(self, handler_mock,
resp_mock, request_mock):
request_mock.body = '{}'
request_mock.content_type = 'application/json'
handler_create = handler_mock.return_value.create
handler_create.return_value = fakes.FakePlan()
plan.PlansController().post()
self.assertEqual(400, resp_mock.status)
class TestPlanAsDict(base.BaseTestCase):
scenarios = [
('none', dict(data=None)),
('one', dict(data={'name': 'foo'})),
('full', dict(data={'uri': 'http://example.com/v1/plans/x1',
'name': 'Example-plan',
'type': 'plan'}))
]
def test_as_dict(self):
objects.load()
if self.data is None:
s = planmodel.Plan()
self.data = {}
else:
s = planmodel.Plan(**self.data)
if 'uri' in self.data:
del self.data['uri']
if 'type' in self.data:
del self.data['type']
self.assertEqual(self.data, s.as_dict(objects.registry.Plan))
|
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nitroml.pipeline_filtering."""
import collections
from typing import Any, Mapping, Sequence
from absl.testing import absltest
from absl.testing import parameterized
from nitroml import pipeline_filtering
from tfx.dsl.compiler import constants as dsl_constants
from tfx.proto.orchestration import executable_spec_pb2
from tfx.proto.orchestration import pipeline_pb2 as p_pb2
from tfx.utils import test_case_utils
from google3.google.protobuf import any_pb2
from google3.learning.tfx.tflex.proto.deployment_config.pluggable_orchestrator import deployment_config_pb2
from ml_metadata.proto import metadata_store_pb2 as mlmd_pb2
_PIPELINE_RUN_CONTEXT_KEY = dsl_constants.PIPELINE_RUN_CONTEXT_TYPE_NAME
def to_context_spec(type_name: str, name: str) -> p_pb2.ContextSpec:
return p_pb2.ContextSpec(
type=mlmd_pb2.ContextType(name=type_name),
name=p_pb2.Value(field_value=mlmd_pb2.Value(string_value=name)))
def to_output_spec(artifact_name: str) -> p_pb2.OutputSpec:
return p_pb2.OutputSpec(
artifact_spec=p_pb2.OutputSpec.ArtifactSpec(
type=mlmd_pb2.ArtifactType(name=artifact_name)))
def to_input_channel(
producer_output_key: str, producer_node_id: str, artifact_type: str,
context_names: Mapping[str, str]) -> p_pb2.InputSpec.Channel:
# pylint: disable=g-complex-comprehension
context_queries = [
p_pb2.InputSpec.Channel.ContextQuery(
type=mlmd_pb2.ContextType(name=context_type),
name=p_pb2.Value(
field_value=mlmd_pb2.Value(string_value=context_name)))
for context_type, context_name in context_names.items()
]
# pylint: enable=g-complex-comprehension
return p_pb2.InputSpec.Channel(
output_key=producer_output_key,
producer_node_query=p_pb2.InputSpec.Channel.ProducerNodeQuery(
id=producer_node_id),
context_queries=context_queries,
artifact_query=p_pb2.InputSpec.Channel.ArtifactQuery(
type=mlmd_pb2.ArtifactType(name=artifact_type)))
def to_any_proto(input_proto):
result = any_pb2.Any()
result.Pack(input_proto)
return result
def make_dummy_executable_specs(node_ids: Sequence[str]) -> Mapping[str, Any]:
result = {}
for node_id in node_ids:
result[node_id] = to_any_proto(
deployment_config_pb2.ExecutableSpec(
python_class_executable_spec=(
executable_spec_pb2.PythonClassExecutableSpec(
class_path='google3.path.to.Executable',
extra_flags=[node_id, 'extra', 'flags']))))
return result
def make_dummy_custom_driver_specs(
node_ids: Sequence[str]) -> Mapping[str, Any]:
result = {}
for node_id in node_ids:
result[node_id] = to_any_proto(
deployment_config_pb2.ExecutableSpec(
python_class_executable_spec=(
executable_spec_pb2.PythonClassExecutableSpec(
class_path='google3.path.to.CustomDriver',
extra_flags=[node_id, 'extra', 'flags']))))
return result
def make_dummy_node_level_platform_configs(
node_ids: Sequence[str]) -> Mapping[str, Any]:
result = {}
for node_id in node_ids:
result[node_id] = to_any_proto(
deployment_config_pb2.BorgPlatformConfig(
logs_read_access_roles=f'{node_id}.logreader'))
return result
class PipelineFilteringTest(parameterized.TestCase, test_case_utils.TfxTest):
def testSubpipeline_error(self):
"""If Pipeline contains sub-pipeline, raise NotImplementedError."""
node_a = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='A'), id='a'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'a')
])))
sub_pipeline_node = p_pb2.Pipeline.PipelineOrNode(
sub_pipeline=p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_subpipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_a]))
input_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[sub_pipeline_node])
with self.assertRaises(ValueError):
_ = pipeline_filtering.filter_pipeline(
input_pipeline,
pipeline_run_id_fn=lambda _: 'pipeline_run_000',
from_nodes=lambda _: True,
to_nodes=lambda _: True,
)
def testNoFilter(self):
"""Basic case where there are no filters applied.
input_pipeline: node_a -> node_b -> node_c
from_node: all nodes
to_node: all nodes
expected output_pipeline: node_a -> node_b -> node_c
"""
node_a = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='A'), id='a'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'a')
]),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('AB')}),
downstream_nodes=['b']))
node_b = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='B'), id='b'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'b')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='a',
producer_output_key='out',
artifact_type='AB',
context_names={
'pipeline': 'my_pipeline',
'component': 'a'
})
],
min_count=1)
}),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('BC')}),
upstream_nodes=['a'],
downstream_nodes=['c']))
node_c = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='C'), id='c'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'c')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='b',
producer_output_key='out',
artifact_type='BC',
context_names={
'pipeline': 'my_pipeline',
'component': 'b'
})
],
min_count=1)
}),
upstream_nodes=['b']))
input_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_a, node_b, node_c])
filtered_pipeline = pipeline_filtering.filter_pipeline(
input_pipeline,
pipeline_run_id_fn=lambda _: 'pipeline_run_000',
from_nodes=lambda _: True,
to_nodes=lambda _: True,
)
self.assertProtoEquals(input_pipeline, filtered_pipeline)
def testFilterOutNothing(self):
"""Basic case where no nodes are filtered out.
input_pipeline: node_a -> node_b -> node_c
from_node: node_a
to_node: node_c
expected output_pipeline: node_a -> node_b -> node_c
"""
node_a = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='A'), id='a'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'a')
]),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('AB')}),
downstream_nodes=['b']))
node_b = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='B'), id='b'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'b')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='a',
producer_output_key='out',
artifact_type='AB',
context_names={
'pipeline': 'my_pipeline',
'component': 'a'
})
],
min_count=1)
}),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('BC')}),
upstream_nodes=['a'],
downstream_nodes=['c']))
node_c = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='C'), id='c'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'c')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='b',
producer_output_key='out',
artifact_type='BC',
context_names={
'pipeline': 'my_pipeline',
'component': 'b'
})
],
min_count=1)
}),
upstream_nodes=['b']))
input_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_a, node_b, node_c])
filtered_pipeline = pipeline_filtering.filter_pipeline(
input_pipeline,
pipeline_run_id_fn=lambda _: 'pipeline_run_000',
from_nodes=lambda node_id: (node_id == 'a'),
to_nodes=lambda node_id: (node_id == 'c'),
)
self.assertProtoEquals(input_pipeline, filtered_pipeline)
def testFilterOutSinkNode(self):
"""Filter out a node that has upstream nodes but no downstream nodes.
input_pipeline: node_a -> node_b -> node_c
to_node: node_b
expected_output_pipeline: node_a -> node_b
"""
node_a = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='A'), id='a'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'a')
]),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('AB')}),
downstream_nodes=['b']))
node_b = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='B'), id='b'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'b')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='a',
producer_output_key='out',
artifact_type='AB',
context_names={
'pipeline': 'my_pipeline',
'component': 'a'
})
],
min_count=1)
}),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('BC')}),
upstream_nodes=['a'],
downstream_nodes=['c']))
node_c = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='C'), id='c'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'c')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='b',
producer_output_key='out',
artifact_type='BC',
context_names={
'pipeline': 'my_pipeline',
'component': 'b'
})
],
min_count=1)
}),
upstream_nodes=['b']))
input_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_a, node_b, node_c])
filtered_pipeline = pipeline_filtering.filter_pipeline(
input_pipeline,
pipeline_run_id_fn=lambda _: 'pipeline_run_000',
from_nodes=lambda node_id: (node_id == 'a'),
to_nodes=lambda node_id: (node_id == 'b'),
)
node_b_no_downstream = p_pb2.Pipeline.PipelineOrNode()
node_b_no_downstream.CopyFrom(node_b)
del node_b_no_downstream.pipeline_node.downstream_nodes[:]
expected_output_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_a, node_b_no_downstream])
self.assertProtoEquals(expected_output_pipeline, filtered_pipeline)
def testFilterOutSourceNode(self):
"""Filter out a node that has no upstream nodes but has downstream nodes.
input_pipeline: node_a -> node_b -> node_c
from_node: node_b
to_node: node_c
old_pipeline_run_id: pipeline_run_000
expected_output_pipeline: node_b -> node_c
"""
node_a = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='A'), id='a'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'a')
]),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('AB')}),
downstream_nodes=['b']))
node_b = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='B'), id='b'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'b')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='a',
producer_output_key='out',
artifact_type='AB',
context_names={
'pipeline': 'my_pipeline',
'component': 'a'
})
],
min_count=1)
}),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('BC')}),
upstream_nodes=['a'],
downstream_nodes=['c']))
node_c = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='C'), id='c'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'c')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='b',
producer_output_key='out',
artifact_type='BC',
context_names={
'pipeline': 'my_pipeline',
'component': 'b'
})
],
min_count=1)
}),
upstream_nodes=['b']))
input_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_a, node_b, node_c])
filtered_pipeline = pipeline_filtering.filter_pipeline(
input_pipeline,
pipeline_run_id_fn=lambda _: 'pipeline_run_000',
from_nodes=lambda node_id: (node_id == 'b'),
to_nodes=lambda node_id: (node_id == 'c'),
)
node_b_fixed = p_pb2.Pipeline.PipelineOrNode()
node_b_fixed.CopyFrom(node_b)
del node_b_fixed.pipeline_node.upstream_nodes[:]
node_b_fixed.pipeline_node.inputs.inputs['in'].channels[
0].context_queries.append(
p_pb2.InputSpec.Channel.ContextQuery(
type=mlmd_pb2.ContextType(name='pipeline_run'),
name=p_pb2.Value(
field_value=mlmd_pb2.Value(
string_value='pipeline_run_000'))))
expected_output_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_b_fixed, node_c])
self.assertProtoEquals(expected_output_pipeline, filtered_pipeline)
def testFilterOutSourceNode_triangle(self):
"""Filter out a source node in a triangle.
input_pipeline:
node_a -> node_b -> node_c
|--------------^
from_node: node_b
to_node: node_c
old_pipeline_run_id: pipeline_run_000
expected_output_pipeline: node_b -> node_c
"""
node_a = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='A'), id='a'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'a')
]),
outputs=p_pb2.NodeOutputs(outputs={
'out_b': to_output_spec('AB'),
'out_c': to_output_spec('AC')
}),
downstream_nodes=['b', 'c']))
node_b = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='B'), id='b'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'b')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='a',
producer_output_key='out',
artifact_type='AB',
context_names={
'pipeline': 'my_pipeline',
'component': 'a'
})
],
min_count=1)
}),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('BC')}),
upstream_nodes=['a'],
downstream_nodes=['c']))
node_c = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='C'), id='c'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'c')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in_a':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='a',
producer_output_key='out_c',
artifact_type='AC',
context_names={
'pipeline': 'my_pipeline',
'component': 'a'
})
],
min_count=1),
'in_b':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='b',
producer_output_key='out',
artifact_type='BC',
context_names={
'pipeline': 'my_pipeline',
'component': 'b'
})
],
min_count=1)
}),
upstream_nodes=['a', 'b']))
input_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_a, node_b, node_c])
filtered_pipeline = pipeline_filtering.filter_pipeline(
input_pipeline,
pipeline_run_id_fn=lambda _: 'pipeline_run_000',
from_nodes=lambda node_id: (node_id == 'b'),
to_nodes=lambda node_id: (node_id == 'c'),
)
node_b_fixed = p_pb2.Pipeline.PipelineOrNode()
node_b_fixed.CopyFrom(node_b)
del node_b_fixed.pipeline_node.upstream_nodes[:]
node_b_fixed.pipeline_node.inputs.inputs['in'].channels[
0].context_queries.append(
p_pb2.InputSpec.Channel.ContextQuery(
type=mlmd_pb2.ContextType(name='pipeline_run'),
name=p_pb2.Value(
field_value=mlmd_pb2.Value(
string_value='pipeline_run_000'))))
node_c_fixed = p_pb2.Pipeline.PipelineOrNode()
node_c_fixed.CopyFrom(node_c)
node_c_fixed.pipeline_node.upstream_nodes[:] = 'b'
node_c_fixed.pipeline_node.inputs.inputs['in_a'].channels[
0].context_queries.append(
p_pb2.InputSpec.Channel.ContextQuery(
type=mlmd_pb2.ContextType(name='pipeline_run'),
name=p_pb2.Value(
field_value=mlmd_pb2.Value(
string_value='pipeline_run_000'))))
expected_output_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_b_fixed, node_c_fixed])
self.assertProtoEquals(expected_output_pipeline, filtered_pipeline)
def testSkipNodes(self):
"""Skip a node in the middle.
input_pipeline: node_a -> node_b -> node_c
from_node: all nodes
to_node: all nodes
skip_node: node_b
old_pipeline_run_id: pipeline_run_000
expected_output_pipeline: node_a (unconnected) node_c
"""
node_a = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='A'), id='a'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'a')
]),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('AB')}),
downstream_nodes=['b']))
node_b = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='B'), id='b'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'b')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='a',
producer_output_key='out',
artifact_type='AB',
context_names={
'pipeline': 'my_pipeline',
'component': 'a'
})
],
min_count=1)
}),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('BC')}),
upstream_nodes=['a'],
downstream_nodes=['c']))
node_c = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='C'), id='c'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'c')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='b',
producer_output_key='out',
artifact_type='BC',
context_names={
'pipeline': 'my_pipeline',
'component': 'b'
})
],
min_count=1)
}),
upstream_nodes=['b']))
input_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_a, node_b, node_c])
filtered_pipeline = pipeline_filtering.filter_pipeline(
input_pipeline,
pipeline_run_id_fn=lambda _: 'pipeline_run_000',
skip_nodes=lambda node_id: (node_id == 'b'),
)
node_a_fixed = p_pb2.Pipeline.PipelineOrNode()
node_a_fixed.CopyFrom(node_a)
del node_a_fixed.pipeline_node.downstream_nodes[:]
node_c_fixed = p_pb2.Pipeline.PipelineOrNode()
node_c_fixed.CopyFrom(node_c)
del node_c_fixed.pipeline_node.upstream_nodes[:]
node_c_fixed.pipeline_node.inputs.inputs['in'].channels[
0].context_queries.append(
p_pb2.InputSpec.Channel.ContextQuery(
type=mlmd_pb2.ContextType(name='pipeline_run'),
name=p_pb2.Value(
field_value=mlmd_pb2.Value(
string_value='pipeline_run_000'))))
expected_output_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_a_fixed, node_c_fixed])
self.assertProtoEquals(expected_output_pipeline, filtered_pipeline)
def testSkipNodes_preexisting_pipeline_run(self):
"""Skip a node in the middle.
Also contains input_channels with pipeline_run context_query. This simulates
filtering pipeline_run_001, and setting old_pipeline_run_id to
pipeline_run_000.
input_pipeline: node_a -> node_b -> node_c
from_node: all nodes
to_node: all nodes
skip_node: node_b
old_pipeline_run_id: pipeline_run_000
expected_output_pipeline: node_a (unconnected) node_c
"""
node_a = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='A'), id='a'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'a')
]),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('AB')}),
downstream_nodes=['b']))
node_b = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='B'), id='b'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'b')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='a',
producer_output_key='out',
artifact_type='AB',
context_names={
'pipeline':
'my_pipeline',
_PIPELINE_RUN_CONTEXT_KEY:
'pipeline_run_001',
'component':
'a'
})
],
min_count=1)
}),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('BC')}),
upstream_nodes=['a'],
downstream_nodes=['c']))
node_c = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='C'), id='c'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'c')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='b',
producer_output_key='out',
artifact_type='BC',
context_names={
'pipeline':
'my_pipeline',
_PIPELINE_RUN_CONTEXT_KEY:
'pipeline_run_001',
'component':
'b'
})
],
min_count=1)
}),
upstream_nodes=['b']))
input_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_a, node_b, node_c])
filtered_pipeline = pipeline_filtering.filter_pipeline(
input_pipeline,
pipeline_run_id_fn=lambda _: 'pipeline_run_000',
skip_nodes=lambda node_id: (node_id == 'b'),
)
node_a_fixed = p_pb2.Pipeline.PipelineOrNode()
node_a_fixed.CopyFrom(node_a)
del node_a_fixed.pipeline_node.downstream_nodes[:]
node_c_fixed = p_pb2.Pipeline.PipelineOrNode()
node_c_fixed.CopyFrom(node_c)
del node_c_fixed.pipeline_node.upstream_nodes[:]
del node_c_fixed.pipeline_node.inputs.inputs['in'].channels[:]
node_c_fixed.pipeline_node.inputs.inputs['in'].channels.append(
to_input_channel(
producer_node_id='b',
producer_output_key='out',
artifact_type='BC',
context_names={
'pipeline': 'my_pipeline',
_PIPELINE_RUN_CONTEXT_KEY: 'pipeline_run_000',
'component': 'b'
}))
expected_output_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_a_fixed, node_c_fixed])
self.assertProtoEquals(expected_output_pipeline, filtered_pipeline)
def testMultiplePipelineRunIds(self):
"""Resolve with two different pipeline_run_ids in two input channels.
input_pipeline:
node_a1 -> node_b1
node_a2 -> node_b2
from_node: node_a2, node_b2
to_node: all nodes
pipeline_run_id_fn:
a1>a2 |-> pipeline_run_000
b1>b2 |-> pipeline_run_001
expected_output_pipeline:
(pipeline_run_000)>node_a2
(pipeline_run_001)>node_b2
"""
node_a1 = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='A1'), id='a1'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'a1')
]),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('AB1')}),
downstream_nodes=['b1']))
node_b1 = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='B1'), id='b1'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'b1')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='a1',
producer_output_key='out',
artifact_type='AB1',
context_names={
'pipeline': 'my_pipeline',
'component': 'a1'
})
],
min_count=1)
}),
upstream_nodes=['a1']))
node_a2 = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='A2'), id='a2'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'a2')
]),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('AB2')}),
downstream_nodes=['b2']))
node_b2 = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='B2'), id='b2'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'b2')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='a2',
producer_output_key='out',
artifact_type='AB2',
context_names={
'pipeline': 'my_pipeline',
'component': 'a2'
})
],
min_count=1)
}),
upstream_nodes=['a2']))
input_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_a1, node_b1, node_a2, node_b2])
def _pipeline_run_id_fn(channel: p_pb2.InputSpec.Channel) -> str:
if channel.producer_node_query.id == 'a1':
return 'pipeline_run_000'
return 'pipeline_run_001'
filtered_pipeline = pipeline_filtering.filter_pipeline(
input_pipeline,
pipeline_run_id_fn=_pipeline_run_id_fn,
from_nodes=lambda node_id: (node_id[0] == 'b'),
)
node_b1_fixed = p_pb2.Pipeline.PipelineOrNode()
node_b1_fixed.CopyFrom(node_b1)
del node_b1_fixed.pipeline_node.upstream_nodes[:]
del node_b1_fixed.pipeline_node.inputs.inputs['in'].channels[:]
node_b1_fixed.pipeline_node.inputs.inputs['in'].channels.append(
to_input_channel(
producer_node_id='a1',
producer_output_key='out',
artifact_type='AB1',
context_names=collections.OrderedDict([
('pipeline', 'my_pipeline'),
('component', 'a1'),
(_PIPELINE_RUN_CONTEXT_KEY, 'pipeline_run_000'),
])))
node_b2_fixed = p_pb2.Pipeline.PipelineOrNode()
node_b2_fixed.CopyFrom(node_b2)
del node_b2_fixed.pipeline_node.upstream_nodes[:]
del node_b2_fixed.pipeline_node.inputs.inputs['in'].channels[:]
node_b2_fixed.pipeline_node.inputs.inputs['in'].channels.append(
to_input_channel(
producer_node_id='a2',
producer_output_key='out',
artifact_type='AB2',
context_names=collections.OrderedDict([
('pipeline', 'my_pipeline'),
('component', 'a2'),
(_PIPELINE_RUN_CONTEXT_KEY, 'pipeline_run_001'),
])))
expected_output_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_b1_fixed, node_b2_fixed])
self.assertProtoEquals(expected_output_pipeline, filtered_pipeline)
@parameterized.named_parameters(
{
'testcase_name': 'none',
'input_deployment_cfg': None,
'expected_deployment_cfg': any_pb2.Any()
}, {
'testcase_name':
'all',
'input_deployment_cfg':
to_any_proto(
p_pb2.IntermediateDeploymentConfig(
executor_specs=make_dummy_executable_specs('abc'),
custom_driver_specs=make_dummy_custom_driver_specs('abc'),
node_level_platform_configs=(
make_dummy_node_level_platform_configs('abc')),
metadata_connection_config=to_any_proto(
mlmd_pb2.ConnectionConfig(
fake_database=mlmd_pb2.FakeDatabaseConfig())))),
'expected_deployment_cfg':
to_any_proto(
p_pb2.IntermediateDeploymentConfig(
executor_specs=make_dummy_executable_specs('ab'),
custom_driver_specs=make_dummy_custom_driver_specs('ab'),
node_level_platform_configs=(
make_dummy_node_level_platform_configs('ab')),
metadata_connection_config=to_any_proto(
mlmd_pb2.ConnectionConfig(
fake_database=mlmd_pb2.FakeDatabaseConfig()))))
}, {
'testcase_name':
'missing_fields',
'input_deployment_cfg':
to_any_proto(
p_pb2.IntermediateDeploymentConfig(
executor_specs=make_dummy_executable_specs('abc'),
metadata_connection_config=to_any_proto(
mlmd_pb2.ConnectionConfig(
fake_database=mlmd_pb2.FakeDatabaseConfig())))),
'expected_deployment_cfg':
to_any_proto(
p_pb2.IntermediateDeploymentConfig(
executor_specs=make_dummy_executable_specs('ab'),
metadata_connection_config=to_any_proto(
mlmd_pb2.ConnectionConfig(
fake_database=mlmd_pb2.FakeDatabaseConfig()))))
}, {
'testcase_name':
'different_fields',
'input_deployment_cfg':
to_any_proto(
p_pb2.IntermediateDeploymentConfig(
executor_specs=make_dummy_executable_specs('c'),
custom_driver_specs=make_dummy_custom_driver_specs('bc'),
node_level_platform_configs=(
make_dummy_node_level_platform_configs('ab')),
metadata_connection_config=to_any_proto(
mlmd_pb2.ConnectionConfig(
fake_database=mlmd_pb2.FakeDatabaseConfig())))),
'expected_deployment_cfg':
to_any_proto(
p_pb2.IntermediateDeploymentConfig(
custom_driver_specs=make_dummy_custom_driver_specs('b'),
node_level_platform_configs=(
make_dummy_node_level_platform_configs('ab')),
metadata_connection_config=to_any_proto(
mlmd_pb2.ConnectionConfig(
fake_database=mlmd_pb2.FakeDatabaseConfig()))))
})
def testDeploymentConfig(self, input_deployment_cfg, expected_deployment_cfg):
"""Test that per-node deployment configs are filtered correctly."""
node_a = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='A'), id='a'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'a')
]),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('AB')}),
downstream_nodes=['b']))
node_b = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='B'), id='b'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'b')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='a',
producer_output_key='out',
artifact_type='AB',
context_names={
'pipeline': 'my_pipeline',
'component': 'a'
})
],
min_count=1)
}),
outputs=p_pb2.NodeOutputs(outputs={'out': to_output_spec('BC')}),
upstream_nodes=['a'],
downstream_nodes=['c']))
node_c = p_pb2.Pipeline.PipelineOrNode(
pipeline_node=p_pb2.PipelineNode(
node_info=p_pb2.NodeInfo(
type=mlmd_pb2.ExecutionType(name='C'), id='c'),
contexts=p_pb2.NodeContexts(contexts=[
to_context_spec('pipeline', 'my_pipeline'),
to_context_spec('component', 'c')
]),
inputs=p_pb2.NodeInputs(
inputs={
'in':
p_pb2.InputSpec(
channels=[
to_input_channel(
producer_node_id='b',
producer_output_key='out',
artifact_type='BC',
context_names={
'pipeline': 'my_pipeline',
'component': 'b'
})
],
min_count=1)
}),
upstream_nodes=['b']))
input_pipeline = p_pb2.Pipeline(
pipeline_info=p_pb2.PipelineInfo(id='my_pipeline'),
execution_mode=p_pb2.Pipeline.ExecutionMode.SYNC,
nodes=[node_a, node_b, node_c],
deployment_config=input_deployment_cfg)
filtered_pipeline = pipeline_filtering.filter_pipeline(
input_pipeline,
pipeline_run_id_fn=lambda _: 'pipeline_run_000',
from_nodes=lambda node_id: (node_id == 'a'),
to_nodes=lambda node_id: (node_id == 'b'),
)
self.assertProtoEquals(expected_deployment_cfg,
filtered_pipeline.deployment_config)
if __name__ == '__main__':
absltest.main()
|
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import inspect
import os
import sys
import traceback
from time import sleep
scriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
maindir = os.path.abspath(os.path.join(scriptdir, '../../'))
sys.path.append(maindir)
transitionsdir = os.path.abspath(os.path.join(scriptdir, '../../transitions'))
sys.path.append(transitionsdir)
from oscrypto import *
from encryptstates import *
from Common import *
from CommandExecutor import *
from DiskUtil import *
from transitions import *
class RHEL68EncryptionStateMachine(OSEncryptionStateMachine):
states = [
State(name='uninitialized'),
State(name='prereq', on_enter='on_enter_state'),
State(name='selinux', on_enter='on_enter_state'),
State(name='stripdown', on_enter='on_enter_state'),
State(name='unmount_oldroot', on_enter='on_enter_state'),
State(name='encrypt_block_device', on_enter='on_enter_state'),
State(name='patch_boot_system', on_enter='on_enter_state'),
State(name='completed'),
]
transitions = [
{
'trigger': 'skip_encryption',
'source': 'uninitialized',
'dest': 'completed'
},
{
'trigger': 'enter_prereq',
'source': 'uninitialized',
'dest': 'prereq'
},
{
'trigger': 'enter_selinux',
'source': 'prereq',
'dest': 'selinux',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'enter_stripdown',
'source': 'selinux',
'dest': 'stripdown',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'enter_unmount_oldroot',
'source': 'stripdown',
'dest': 'unmount_oldroot',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'retry_unmount_oldroot',
'source': 'unmount_oldroot',
'dest': 'unmount_oldroot',
'before': 'on_enter_state'
},
{
'trigger': 'enter_encrypt_block_device',
'source': 'unmount_oldroot',
'dest': 'encrypt_block_device',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'enter_patch_boot_system',
'source': 'encrypt_block_device',
'dest': 'patch_boot_system',
'before': 'on_enter_state',
'conditions': 'should_exit_previous_state'
},
{
'trigger': 'stop_machine',
'source': 'patch_boot_system',
'dest': 'completed',
'conditions': 'should_exit_previous_state'
},
]
def on_enter_state(self):
super(RHEL68EncryptionStateMachine, self).on_enter_state()
def should_exit_previous_state(self):
# when this is called, self.state is still the "source" state in the transition
return super(RHEL68EncryptionStateMachine, self).should_exit_previous_state()
def __init__(self, hutil, distro_patcher, logger, encryption_environment):
super(RHEL68EncryptionStateMachine, self).__init__(hutil, distro_patcher, logger, encryption_environment)
self.state_objs = {
'prereq': PrereqState(self.context),
'selinux': SelinuxState(self.context),
'stripdown': StripdownState(self.context),
'unmount_oldroot': UnmountOldrootState(self.context),
'encrypt_block_device': EncryptBlockDeviceState(self.context),
'patch_boot_system': PatchBootSystemState(self.context),
}
self.state_machine = Machine(model=self,
states=RHEL68EncryptionStateMachine.states,
transitions=RHEL68EncryptionStateMachine.transitions,
initial='uninitialized')
def start_encryption(self):
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="mount",
raise_exception_on_failure=True,
communicator=proc_comm)
if '/dev/mapper/osencrypt' in proc_comm.stdout:
self.logger.log("OS volume is already encrypted")
self.skip_encryption()
self.log_machine_state()
return
self.log_machine_state()
self.enter_prereq()
self.log_machine_state()
self.enter_selinux()
self.log_machine_state()
self.enter_stripdown()
self.log_machine_state()
oldroot_unmounted_successfully = False
attempt = 1
while not oldroot_unmounted_successfully:
self.logger.log("Attempt #{0} to unmount /oldroot".format(attempt))
try:
if attempt == 1:
self.enter_unmount_oldroot()
elif attempt > 10:
raise Exception("Could not unmount /oldroot in 10 attempts")
else:
self.retry_unmount_oldroot()
self.log_machine_state()
except Exception as e:
message = "Attempt #{0} to unmount /oldroot failed with error: {1}, stack trace: {2}".format(attempt,
e,
traceback.format_exc())
self.logger.log(msg=message)
self.hutil.do_status_report(operation='EnableEncryptionOSVolume',
status=CommonVariables.extension_error_status,
status_code=str(CommonVariables.unmount_oldroot_error),
message=message)
sleep(10)
if attempt > 10:
raise Exception(message)
else:
oldroot_unmounted_successfully = True
finally:
attempt += 1
self.enter_encrypt_block_device()
self.log_machine_state()
self.enter_patch_boot_system()
self.log_machine_state()
self.stop_machine()
self.log_machine_state()
self._reboot()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
import functools
import sys
from oslo.config import cfg
import webob.exc
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import safe_utils
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='make exception message format errors fatal'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
def _cleanse_dict(original):
"""Strip all admin_password, new_pass, rescue_pass keys from a dict."""
return dict((k, v) for k, v in original.iteritems() if not "_pass" in k)
def wrap_exception(notifier=None, get_notifier=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception as e:
with excutils.save_and_reraise_exception():
if notifier or get_notifier:
payload = dict(exception=e)
call_dict = safe_utils.getcallargs(f, context,
*args, **kw)
cleansed = _cleanse_dict(call_dict)
payload.update({'args': cleansed})
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
event_type = f.__name__
(notifier or get_notifier()).error(context,
event_type,
payload)
return functools.wraps(f)(wrapped)
return inner
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
raise exc_info[0], exc_info[1], exc_info[2]
else:
# at least get the core message out if something happened
message = self.msg_fmt
super(NovaException, self).__init__(message)
def format_message(self):
# NOTE(mrodden): use the first argument to the python Exception object
# which should be our full NovaException message, (see __init__)
return self.args[0]
class EncryptionFailure(NovaException):
msg_fmt = _("Failed to encrypt text: %(reason)s")
class DecryptionFailure(NovaException):
msg_fmt = _("Failed to decrypt text: %(reason)s")
class VirtualInterfaceCreateException(NovaException):
msg_fmt = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
msg_fmt = _("5 attempts to create virtual interface"
"with unique mac address failed")
class GlanceConnectionFailed(NovaException):
msg_fmt = _("Connection to glance host %(host)s:%(port)s failed: "
"%(reason)s")
class NotAuthorized(NovaException):
ec2_code = 'AuthFailure'
msg_fmt = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
msg_fmt = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
msg_fmt = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotActive(NovaException):
# NOTE(jruzicka): IncorrectState is used for volumes only in EC2,
# but it still seems like the most appropriate option.
ec2_code = 'IncorrectState'
msg_fmt = _("Image %(image_id)s is not active.")
class ImageNotAuthorized(NovaException):
msg_fmt = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
msg_fmt = _("Unacceptable parameters.")
code = 400
class InvalidBDM(Invalid):
msg_fmt = _("Block Device Mapping is Invalid.")
class InvalidBDMSnapshot(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get snapshot %(id)s.")
class InvalidBDMVolume(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get volume %(id)s.")
class InvalidBDMImage(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get image %(id)s.")
class InvalidBDMBootSequence(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"Boot sequence for the instance "
"and image/block device mapping "
"combination is not valid.")
class InvalidBDMLocalsLimit(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"You specified more local devices than the "
"limit allows")
class InvalidBDMEphemeralSize(InvalidBDM):
msg_fmt = _("Ephemeral disks requested are larger than "
"the instance type allows.")
class InvalidBDMSwapSize(InvalidBDM):
msg_fmt = _("Swap drive requested is larger than instance type allows.")
class InvalidBDMFormat(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"%(details)s")
class InvalidBDMForLegacy(InvalidBDM):
msg_fmt = _("Block Device Mapping cannot "
"be converted to legacy format. ")
class InvalidAttribute(Invalid):
msg_fmt = _("Attribute not supported: %(attr)s")
class VolumeUnattached(Invalid):
ec2_code = 'IncorrectState'
msg_fmt = _("Volume %(volume_id)s is not attached to anything")
class VolumeNotCreated(NovaException):
msg_fmt = _("Volume %(volume_id)s did not finish being created"
" even after we waited %(seconds)s seconds or %(attempts)s"
" attempts.")
class InvalidKeypair(Invalid):
ec2_code = 'InvalidKeyPair.Format'
msg_fmt = _("Keypair data is invalid") + ": %(reason)s"
class InvalidRequest(Invalid):
msg_fmt = _("The request is invalid.")
class InvalidInput(Invalid):
msg_fmt = _("Invalid input received") + ": %(reason)s"
class InvalidVolume(Invalid):
ec2_code = 'UnsupportedOperation'
msg_fmt = _("Invalid volume") + ": %(reason)s"
class InvalidMetadata(Invalid):
msg_fmt = _("Invalid metadata") + ": %(reason)s"
class InvalidMetadataSize(Invalid):
msg_fmt = _("Invalid metadata size") + ": %(reason)s"
class InvalidPortRange(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
msg_fmt = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
msg_fmt = _("Invalid content type %(content_type)s.")
class InvalidCidr(Invalid):
msg_fmt = _("Invalid cidr %(cidr)s.")
class InvalidUnicodeParameter(Invalid):
msg_fmt = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("%(err)s")
class InvalidAggregateAction(Invalid):
msg_fmt = _("Cannot perform action '%(action)s' on aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
msg_fmt = _("Group not valid. Reason: %(reason)s")
class InvalidSortKey(Invalid):
msg_fmt = _("Sort key supplied was not valid.")
class InstanceInvalidState(Invalid):
msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
msg_fmt = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
msg_fmt = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotRescuable(Invalid):
msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
class InstanceNotReady(Invalid):
msg_fmt = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
msg_fmt = _("Failed to suspend instance") + ": %(reason)s"
class InstanceResumeFailure(Invalid):
msg_fmt = _("Failed to resume instance: %(reason)s.")
class InstancePowerOnFailure(Invalid):
msg_fmt = _("Failed to power on instance: %(reason)s.")
class InstancePowerOffFailure(Invalid):
msg_fmt = _("Failed to power off instance: %(reason)s.")
class InstanceRebootFailure(Invalid):
msg_fmt = _("Failed to reboot instance") + ": %(reason)s"
class InstanceTerminationFailure(Invalid):
msg_fmt = _("Failed to terminate instance") + ": %(reason)s"
class InstanceDeployFailure(Invalid):
msg_fmt = _("Failed to deploy instance") + ": %(reason)s"
class MultiplePortsNotApplicable(Invalid):
msg_fmt = _("Failed to launch instances") + ": %(reason)s"
class ServiceUnavailable(Invalid):
msg_fmt = _("Service is unavailable at this time.")
class ComputeResourcesUnavailable(ServiceUnavailable):
msg_fmt = _("Insufficient compute resources.")
class ComputeServiceUnavailable(ServiceUnavailable):
msg_fmt = _("Compute service of %(host)s is unavailable at this time.")
class UnableToMigrateToSelf(Invalid):
msg_fmt = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class InvalidHypervisorType(Invalid):
msg_fmt = _("The supplied hypervisor type of is invalid.")
class DestinationHypervisorTooOld(Invalid):
msg_fmt = _("The instance requires a newer hypervisor version than "
"has been provided.")
class DestinationDiskExists(Invalid):
msg_fmt = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is invalid.")
class DevicePathInUse(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is in use.")
code = 409
class DeviceIsBusy(Invalid):
msg_fmt = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
msg_fmt = _("Unacceptable CPU info") + ": %(reason)s"
class InvalidIpAddressError(Invalid):
msg_fmt = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
msg_fmt = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
msg_fmt = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
msg_fmt = _("Disk format %(disk_format)s is not acceptable")
class ImageUnacceptable(Invalid):
msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
class InstanceUnacceptable(Invalid):
msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
msg_fmt = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
msg_fmt = _("Expected a uuid but received %(uuid)s.")
class InvalidID(Invalid):
msg_fmt = _("Invalid ID received %(id)s.")
class ConstraintNotMet(NovaException):
msg_fmt = _("Constraint not met.")
code = 412
class NotFound(NovaException):
msg_fmt = _("Resource could not be found.")
code = 404
class AgentBuildNotFound(NotFound):
msg_fmt = _("No agent-build associated with id %(id)s.")
class AgentBuildExists(NovaException):
msg_fmt = _("Agent-build with hypervisor %(hypervisor)s os %(os)s "
"architecture %(architecture)s exists.")
class VolumeNotFound(NotFound):
ec2_code = 'InvalidVolumeID.NotFound'
msg_fmt = _("Volume %(volume_id)s could not be found.")
class SnapshotNotFound(NotFound):
ec2_code = 'InvalidSnapshotID.NotFound'
msg_fmt = _("Snapshot %(snapshot_id)s could not be found.")
class DiskNotFound(NotFound):
msg_fmt = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
msg_fmt = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
msg_fmt = _("Invalid image href %(image_href)s.")
class AutoDiskConfigDisabledByImage(Invalid):
msg_fmt = _("Requested image %(image)s "
"has automatic disk resize disabled.")
class ImageNotFound(NotFound):
msg_fmt = _("Image %(image_id)s could not be found.")
# NOTE(jruzicka): ImageNotFound is not a valid EC2 error code.
class ImageNotFoundEC2(ImageNotFound):
msg_fmt = _("Image %(image_id)s could not be found. The nova EC2 API "
"assigns image ids dynamically when they are listed for the "
"first time. Have you listed image ids since adding this "
"image?")
class ProjectNotFound(NotFound):
msg_fmt = _("Project %(project_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
msg_fmt = _("Cannot find SR to read/write VDI.")
class NetworkDuplicated(Invalid):
msg_fmt = _("Network %(network_id)s is duplicated.")
class NetworkInUse(NovaException):
msg_fmt = _("Network %(network_id)s is still in use.")
class NetworkNotCreated(NovaException):
msg_fmt = _("%(req)s is required to create a network.")
class NetworkNotFound(NotFound):
msg_fmt = _("Network %(network_id)s could not be found.")
class PortNotFound(NotFound):
msg_fmt = _("Port id %(port_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
msg_fmt = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
msg_fmt = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
msg_fmt = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
msg_fmt = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
msg_fmt = _("No networks defined.")
class NetworkNotFoundForProject(NotFound):
msg_fmt = _("Either Network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class NetworkAmbiguous(Invalid):
msg_fmt = _("More than one possible network found. Specify "
"network ID(s) to select which one(s) to connect to,")
class DatastoreNotFound(NotFound):
msg_fmt = _("Could not find the datastore reference(s) which the VM uses.")
class PortInUse(Invalid):
msg_fmt = _("Port %(port_id)s is still in use.")
class PortNotUsable(Invalid):
msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.")
class PortNotFree(Invalid):
msg_fmt = _("No free port available for instance %(instance)s.")
class FixedIpExists(NovaException):
msg_fmt = _("Fixed ip %(address)s already exists.")
class FixedIpNotFound(NotFound):
msg_fmt = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
msg_fmt = _("Fixed ip not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s has zero fixed ips.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
msg_fmt = _("Network host %(host)s has zero fixed ips "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
msg_fmt = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAlreadyInUse(NovaException):
msg_fmt = _("Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s.")
class FixedIpAssociatedWithMultipleInstances(NovaException):
msg_fmt = _("More than one instance is associated with fixed ip address "
"'%(address)s'.")
class FixedIpInvalid(Invalid):
msg_fmt = _("Fixed IP address %(address)s is invalid.")
class NoMoreFixedIps(NovaException):
ec2_code = 'UnsupportedOperation'
msg_fmt = _("Zero fixed ips available.")
class NoFixedIpsDefined(NotFound):
msg_fmt = _("Zero fixed ips could be found.")
class FloatingIpExists(NovaException):
msg_fmt = _("Floating ip %(address)s already exists.")
class FloatingIpNotFound(NotFound):
ec2_code = "UnsupportedOpperation"
msg_fmt = _("Floating ip not found for id %(id)s.")
class FloatingIpDNSExists(Invalid):
msg_fmt = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
msg_fmt = _("Floating ip not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
msg_fmt = _("Floating ip not found for host %(host)s.")
class FloatingIpMultipleFoundForAddress(NovaException):
msg_fmt = _("Multiple floating ips are found for address %(address)s.")
class FloatingIpPoolNotFound(NotFound):
msg_fmt = _("Floating ip pool not found.")
safe = True
class NoMoreFloatingIps(FloatingIpNotFound):
msg_fmt = _("Zero floating ips available.")
safe = True
class FloatingIpAssociated(NovaException):
ec2_code = "UnsupportedOpperation"
msg_fmt = _("Floating ip %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
msg_fmt = _("Floating ip %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
msg_fmt = _("Zero floating ips exist.")
class NoFloatingIpInterface(NotFound):
ec2_code = "UnsupportedOpperation"
msg_fmt = _("Interface %(interface)s not found.")
class CannotDisassociateAutoAssignedFloatingIP(NovaException):
ec2_code = "UnsupportedOpperation"
msg_fmt = _("Cannot disassociate auto assigned floating ip")
class KeypairNotFound(NotFound):
ec2_code = 'InvalidKeyPair.NotFound'
msg_fmt = _("Keypair %(name)s not found for user %(user_id)s")
class ServiceNotFound(NotFound):
msg_fmt = _("Service %(service_id)s could not be found.")
class ServiceBinaryExists(NovaException):
msg_fmt = _("Service with host %(host)s binary %(binary)s exists.")
class ServiceTopicExists(NovaException):
msg_fmt = _("Service with host %(host)s topic %(topic)s exists.")
class HostNotFound(NotFound):
msg_fmt = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
msg_fmt = _("Compute host %(host)s could not be found.")
class HostBinaryNotFound(NotFound):
msg_fmt = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
msg_fmt = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
msg_fmt = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
msg_fmt = _("Quota could not be found")
class QuotaExists(NovaException):
msg_fmt = _("Quota exists for project %(project_id)s, "
"resource %(resource)s")
class QuotaResourceUnknown(QuotaNotFound):
msg_fmt = _("Unknown quota resources %(unknown)s.")
class ProjectUserQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s "
"could not be found.")
class ProjectQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
msg_fmt = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
msg_fmt = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
msg_fmt = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
msg_fmt = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
msg_fmt = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
msg_fmt = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
msg_fmt = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExists(Invalid):
ec2_code = 'InvalidGroup.Duplicate'
msg_fmt = _("Security group %(security_group_name)s already exists "
"for project %(project_id)s.")
class SecurityGroupExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class SecurityGroupDefaultRuleNotFound(Invalid):
msg_fmt = _("Security group default rule (%rule_id)s not found.")
class SecurityGroupCannotBeApplied(Invalid):
msg_fmt = _("Network requires port_security_enabled and subnet associated"
" in order to apply security groups.")
class SecurityGroupRuleExists(Invalid):
ec2_code = 'InvalidPermission.Duplicate'
msg_fmt = _("Rule already exists in group: %(rule)s")
class NoUniqueMatch(NovaException):
msg_fmt = _("No Unique Match Found.")
code = 409
class MigrationNotFound(NotFound):
msg_fmt = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
msg_fmt = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class ConsolePoolNotFound(NotFound):
msg_fmt = _("Console pool %(pool_id)s could not be found.")
class ConsolePoolExists(NovaException):
msg_fmt = _("Console pool with host %(host)s, console_type "
"%(console_type)s and compute_host %(compute_host)s "
"already exists.")
class ConsolePoolNotFoundForHostType(NotFound):
msg_fmt = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
msg_fmt = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s could not be found.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
msg_fmt = _("Invalid console type %(console_type)s")
class ConsoleTypeUnavailable(Invalid):
msg_fmt = _("Unavailable console type %(console_type)s.")
class InstanceTypeNotFound(NotFound):
msg_fmt = _("Instance type %(instance_type_id)s could not be found.")
class InstanceTypeNotFoundByName(InstanceTypeNotFound):
msg_fmt = _("Instance type with name %(instance_type_name)s "
"could not be found.")
class FlavorNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s could not be found.")
class FlavorAccessNotFound(NotFound):
msg_fmt = _("Flavor access not found for %(flavor_id)s / "
"%(project_id)s combination.")
class CellNotFound(NotFound):
msg_fmt = _("Cell %(cell_name)s doesn't exist.")
class CellExists(NovaException):
msg_fmt = _("Cell with name %(name)s already exists.")
class CellRoutingInconsistency(NovaException):
msg_fmt = _("Inconsistency in cell routing: %(reason)s")
class CellServiceAPIMethodNotFound(NotFound):
msg_fmt = _("Service API method not found: %(detail)s")
class CellTimeout(NotFound):
msg_fmt = _("Timeout waiting for response from cell")
class CellMaxHopCountReached(NovaException):
msg_fmt = _("Cell message has reached maximum hop count: %(hop_count)s")
class NoCellsAvailable(NovaException):
msg_fmt = _("No cells available matching scheduling criteria.")
class CellsUpdateUnsupported(NovaException):
msg_fmt = _("Cannot update cells configuration file.")
class InstanceUnknownCell(NotFound):
msg_fmt = _("Cell is not known for instance %(instance_uuid)s")
class SchedulerHostFilterNotFound(NotFound):
msg_fmt = _("Scheduler Host Filter %(filter_name)s could not be found.")
class InstanceTypeExtraSpecsNotFound(NotFound):
msg_fmt = _("Instance Type %(instance_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class FileNotFound(NotFound):
msg_fmt = _("File %(file_path)s could not be found.")
class NoFilesFound(NotFound):
msg_fmt = _("Zero files could be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
msg_fmt = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
msg_fmt = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
msg_fmt = _("Class %(class_name)s could not be found: %(exception)s")
class NotAllowed(NovaException):
msg_fmt = _("Action not allowed.")
class ImageRotationNotAllowed(NovaException):
msg_fmt = _("Rotation is not allowed for snapshots")
class RotationRequiredForBackup(NovaException):
msg_fmt = _("Rotation param is required for backup image_type")
class KeyPairExists(NovaException):
ec2_code = 'InvalidKeyPair.Duplicate'
msg_fmt = _("Key pair '%(key_name)s' already exists.")
class InstanceExists(NovaException):
msg_fmt = _("Instance %(name)s already exists.")
class InstanceTypeExists(NovaException):
msg_fmt = _("Instance Type with name %(name)s already exists.")
class InstanceTypeIdExists(NovaException):
msg_fmt = _("Instance Type with ID %(flavor_id)s already exists.")
class FlavorAccessExists(NovaException):
msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class InvalidSharedStorage(NovaException):
msg_fmt = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(NovaException):
msg_fmt = _("%(path)s is not on local storage: %(reason)s")
class MigrationError(NovaException):
msg_fmt = _("Migration error") + ": %(reason)s"
class MigrationPreCheckError(MigrationError):
msg_fmt = _("Migration pre-check error") + ": %(reason)s"
class MalformedRequestBody(NovaException):
msg_fmt = _("Malformed message body: %(reason)s")
# NOTE(johannes): NotFound should only be used when a 404 error is
# appropriate to be returned
class ConfigNotFound(NovaException):
msg_fmt = _("Could not find config at %(path)s")
class PasteAppNotFound(NovaException):
msg_fmt = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(NovaException):
msg_fmt = _("When resizing, instances must change flavor!")
class ResizeError(NovaException):
msg_fmt = _("Resize error: %(reason)s")
class CannotResizeDisk(NovaException):
msg_fmt = _("Server disk was unable to be resized because: %(reason)s")
class InstanceTypeMemoryTooSmall(NovaException):
msg_fmt = _("Instance type's memory is too small for requested image.")
class InstanceTypeDiskTooSmall(NovaException):
msg_fmt = _("Instance type's disk is too small for requested image.")
class InsufficientFreeMemory(NovaException):
msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.")
class NoValidHost(NovaException):
msg_fmt = _("No valid host was found. %(reason)s")
class QuotaError(NovaException):
ec2_code = 'ResourceLimitExceeded'
msg_fmt = _("Quota exceeded") + ": code=%(code)s"
code = 413
headers = {'Retry-After': 0}
safe = True
class TooManyInstances(QuotaError):
msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)d of %(allowed)d %(resource)s")
class FloatingIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of floating ips exceeded")
class FixedIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of fixed ips exceeded")
class MetadataLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
msg_fmt = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(QuotaError):
msg_fmt = _("Personality file path too long")
class OnsetFileContentLimitExceeded(QuotaError):
msg_fmt = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
ec2_code = 'SecurityGroupLimitExceeded'
msg_fmt = _("Maximum number of security groups or rules exceeded")
class PortLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of ports exceeded")
class AggregateError(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class InstanceTypeCreateFailed(NovaException):
msg_fmt = _("Unable to create instance type")
class InstancePasswordSetFailed(NovaException):
msg_fmt = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class DuplicateVlan(NovaException):
msg_fmt = _("Detected existing vlan with id %(vlan)d")
class CidrConflict(NovaException):
msg_fmt = _("There was a conflict when trying to complete your request.")
code = 409
class InstanceNotFound(NotFound):
ec2_code = 'InvalidInstanceID.NotFound'
msg_fmt = _("Instance %(instance_id)s could not be found.")
class InstanceInfoCacheNotFound(NotFound):
msg_fmt = _("Info cache for instance %(instance_uuid)s could not be "
"found.")
class NodeNotFound(NotFound):
msg_fmt = _("Node %(node_id)s could not be found.")
class NodeNotFoundByUUID(NotFound):
msg_fmt = _("Node with UUID %(node_uuid)s could not be found.")
class MarkerNotFound(NotFound):
msg_fmt = _("Marker %(marker)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
ec2_code = 'InvalidInstanceID.Malformed'
msg_fmt = _("Invalid id: %(val)s (expecting \"i-...\").")
class CouldNotFetchImage(NovaException):
msg_fmt = _("Could not fetch image %(image_id)s")
class CouldNotUploadImage(NovaException):
msg_fmt = _("Could not upload image %(image_id)s")
class TaskAlreadyRunning(NovaException):
msg_fmt = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(NovaException):
msg_fmt = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
msg_fmt = _("Instance %(instance_uuid)s is locked")
class ConfigDriveInvalidValue(Invalid):
msg_fmt = _("Invalid value for Config Drive option: %(option)s")
class ConfigDriveMountFailed(NovaException):
msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormat(NovaException):
msg_fmt = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class InterfaceAttachFailed(Invalid):
msg_fmt = _("Failed to attach network adapter device to %(instance)s")
class InterfaceDetachFailed(Invalid):
msg_fmt = _("Failed to detach network adapter device from %(instance)s")
class InstanceUserDataTooLarge(NovaException):
msg_fmt = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(NovaException):
msg_fmt = _("User data needs to be valid base 64.")
class UnexpectedTaskStateError(NovaException):
msg_fmt = _("unexpected task state: expecting %(expected)s but "
"the actual state is %(actual)s")
class InstanceActionNotFound(NovaException):
msg_fmt = _("Action for request_id %(request_id)s on instance"
" %(instance_uuid)s not found")
class InstanceActionEventNotFound(NovaException):
msg_fmt = _("Event %(event)s not found for action id %(action_id)s")
class UnexpectedVMStateError(NovaException):
msg_fmt = _("unexpected VM state: expecting %(expected)s but "
"the actual state is %(actual)s")
class CryptoCAFileNotFound(FileNotFound):
msg_fmt = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
msg_fmt = _("The CRL file for %(project)s could not be found")
class InstanceRecreateNotSupported(Invalid):
msg_fmt = _('Instance recreate is not implemented by this virt driver.')
class ServiceGroupUnavailable(NovaException):
msg_fmt = _("The service from servicegroup driver %(driver)s is "
"temporarily unavailable.")
class DBNotAllowed(NovaException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
class UnsupportedVirtType(Invalid):
msg_fmt = _("Virtualization type '%(virt)s' is not supported by "
"this compute driver")
class UnsupportedHardware(Invalid):
msg_fmt = _("Requested hardware '%(model)s' is not supported by "
"the '%(virt)s' virt driver")
class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
class BuildAbortException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s")
class RescheduledException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: "
"%(reason)s")
class ShadowTableExists(NovaException):
msg_fmt = _("Shadow table with name %(name)s already exists.")
class InstanceFaultRollback(NovaException):
def __init__(self, inner_exception=None):
message = _("Instance rollback performed due to: %s")
self.inner_exception = inner_exception
super(InstanceFaultRollback, self).__init__(message % inner_exception)
class UnsupportedObjectError(NovaException):
msg_fmt = _('Unsupported object type %(objtype)s')
class OrphanedObjectError(NovaException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class IncompatibleObjectVersion(NovaException):
msg_fmt = _('Version %(objver)s of %(objname)s is not supported')
class ObjectActionError(NovaException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class CoreAPIMissing(NovaException):
msg_fmt = _("Core API extensions are missing: %(missing_apis)s")
class AgentError(NovaException):
msg_fmt = _('Error during following call to agent: %(method)s')
class AgentTimeout(AgentError):
msg_fmt = _('Unable to contact guest agent. '
'The following call timed out: %(method)s')
class AgentNotImplemented(AgentError):
msg_fmt = _('Agent does not support the call: %(method)s')
class InstanceGroupNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s could not be found.")
class InstanceGroupIdExists(NovaException):
msg_fmt = _("Instance group %(group_uuid)s already exists.")
class InstanceGroupMetadataNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no metadata with "
"key %(metadata_key)s.")
class InstanceGroupMemberNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no member with "
"id %(instance_id)s.")
class InstanceGroupPolicyNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no policy %(policy)s.")
class PluginRetriesExceeded(NovaException):
msg_fmt = _("Number of retries to plugin (%(num_retries)d) exceeded.")
class ImageDownloadModuleError(NovaException):
msg_fmt = _("There was an error with the download module %(module)s. "
"%(reason)s")
class ImageDownloadModuleMetaDataError(ImageDownloadModuleError):
msg_fmt = _("The metadata for this location will not work with this "
"module %(module)s. %(reason)s.")
class ImageDownloadModuleNotImplementedError(ImageDownloadModuleError):
msg_fmt = _("The method %(method_name)s is not implemented.")
class ImageDownloadModuleConfigurationError(ImageDownloadModuleError):
msg_fmt = _("The module %(module)s is misconfigured: %(reason)s.")
class PciDeviceWrongAddressFormat(NovaException):
msg_fmt = _("The PCI address %(address)s has an incorrect format.")
class PciDeviceNotFoundById(NotFound):
msg_fmt = _("PCI device %(id)s not found")
class PciDeviceNotFound(NovaException):
msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.")
class PciDeviceInvalidStatus(NovaException):
msg_fmt = _(
"PCI Device %(compute_node_id)s:%(address)s is %(status)s "
"instead of %(hopestatus)s")
class PciDeviceInvalidOwner(NovaException):
msg_fmt = _(
"PCI Device %(compute_node_id)s:%(address)s is owned by %(owner)s "
"instead of %(hopeowner)s")
class PciDeviceRequestFailed(NovaException):
msg_fmt = _(
"PCI Device request (%requests)s failed")
class PciDevicePoolEmpty(NovaException):
msg_fmt = _(
"Attempt to consume PCI Device %(compute_node_id)s:%(address)s "
"from empty pool")
class PciInvalidAlias(NovaException):
msg_fmt = _("Invalid PCI alias definition: %(reason)s")
class PciRequestAliasNotDefined(NovaException):
msg_fmt = _("PCI alias %(alias)s is not defined")
class MissingParameter(NovaException):
ec2_code = 'MissingParameter'
msg_fmt = _("Not enough parameters: %(reason)s")
code = 400
class PciConfigInvalidWhitelist(Invalid):
msg_fmt = _("Invalid PCI devices Whitelist config %(reason)s")
class PciTrackerInvalidNodeId(NovaException):
msg_fmt = _("Cannot change %(node_id)s to %(new_node_id)s")
# Cannot be templated, msg needs to be constructed when raised.
class InternalError(NovaException):
ec2_code = 'InternalError'
msg_fmt = "%(err)s"
class PciDevicePrepareFailed(NovaException):
msg_fmt = _("Failed to prepare PCI device %(id)s for instance "
"%(instance_uuid)s: %(reason)s")
class PciDeviceDetachFailed(NovaException):
msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s")
class PciDeviceUnsupportedHypervisor(NovaException):
msg_fmt = _("%(type)s hypervisor does not support PCI devices")
class KeyManagerError(NovaException):
msg_fmt = _("key manager error: %(reason)s")
class TenantQosGroupNotFound(NovaException):
msg_fmt = _("Project QoS Group %(uuid)s not found.")
code = 404
class ProjectNotInTenantQosGroup(NovaException):
msg_fmt = _("Project %(project_id)s is not in "
"project QoS Group %(uuid)s.")
class TenantQosGroupIdExists(NovaException):
msg_fmt = _("Project QoS Group %(uuid)s already exists.")
class TenantQosGroupDefaultCannotDelete(NovaException):
msg_fmt = _("Default Project QoS Group can not be deleted.")
class TenantQosGroupMappingExists(Invalid):
msg_fmt = _("Tenant QoS Group %(tenant_qos_group_uuid)s already exists "
"for project %(project_id)s.")
|
|
'''
Copyright (c) 2009-2018, K. Kumar (me@kartikkumar.com)
Distributed under the MIT License.
See accompanying file LICENSE.md or copy at http://opensource.org/licenses/MIT
'''
# Set up modules and packages.
# Plotting
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib import cm
from matplotlib.font_manager import FontProperties
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d
import matplotlib.animation as animation
# I/O
import json
import jstyleson
from pprint import pprint
import sqlite3
# Numerical
import math
import numpy as np
import pandas as pd
# System
import sys
import time
print ("")
print ("------------------------------------------------------------------")
print (" dustsim ")
print (" Copyright (c) 2009-2018, K. Kumar (me@kartikkumar.com) ")
print ("------------------------------------------------------------------")
print ("")
# Start timer.
start_time = time.time( )
print ("")
print ("******************************************************************")
print (" Input parameters ")
print ("******************************************************************")
print ("")
# Parse JSON configuration file.
# Raise exception if wrong number of inputs are provided to script.
if len(sys.argv) != 2:
raise Exception("Only provide a JSON config file as input!")
json_input_file = open(sys.argv[1])
with open(sys.argv[1], 'r') as json_input_file:
json_input_string = json_input_file.read()
config = jstyleson.loads(json_input_string)
jstyleson.dumps(config)
pprint(config)
print ("")
print ("******************************************************************")
print (" Operations ")
print ("******************************************************************")
print ("")
print ("Fetching data from database ...")
# Connect to SQLite database.
try:
database = sqlite3.connect(config['database'])
except sqlite3.Error as error:
print ("Error %s:" % error.args[0])
sys.exit(1)
metadata = pd.read_sql("SELECT * FROM " + config['metadata_table'], database)
initial_state = pd.read_sql("SELECT * FROM " + config['initial_states_table'] + " WHERE simulation_id = " + str(config['simulation_id']), database)
simulation_results = pd.read_sql("SELECT * FROM " + config['simulation_results_table'] + " WHERE simulation_id = " + str(config['simulation_id']), database)
print ("Data successfully fetched!")
print ("")
print ("Generating figures ...")
# Pre-compute useful variables.
output_path_prefix = config["output_directory"] + '/'
simulation_time_in_years = simulation_results['time'] / (365.25*24.0*3600.0)
# Generate semi-major axis change figure.
fig = plt.figure()
ax1 = fig.add_subplot(2, 3, 1)
ax2 = fig.add_subplot(2, 3, 2)
ax3 = fig.add_subplot(2, 3, 3)
ax4 = fig.add_subplot(2, 3, 4)
ax5 = fig.add_subplot(2, 3, 5)
ax6 = fig.add_subplot(2, 3, 6)
# Plot semi-major axis time history.
ax1.set_xlabel(r'$t$ [s]')
ax1.set_ylabel(r'$a$ [km]')
ax1.grid()
ax1.plot(simulation_time_in_years, simulation_results['semi_major_axis'],color='k')
# Plot eccentricity time history.
ax2.set_xlabel(r'$t$ [s]')
ax2.set_ylabel(r'$e$ [-]')
ax2.grid()
ax2.plot(simulation_time_in_years, simulation_results['eccentricity'],color='k')
# Plot inclination time history.
ax3.set_xlabel(r'$t$ [s]')
ax3.set_ylabel(r'$i$ [deg]')
ax3.grid()
ax3.plot(simulation_time_in_years, simulation_results['inclination'].apply(math.degrees),color='k')
# Plot argument of periapsis time history.
ax4.set_xlabel(r'$t$ [s]')
ax4.set_ylabel(r'$\omega$ [deg]')
ax4.grid()
ax4.plot(simulation_time_in_years, simulation_results['argument_of_periapsis'].apply(math.degrees),color='k')
# Plot longitude of ascending node time history.
ax5.set_xlabel(r'$t$ [s]')
ax5.set_ylabel(r'$\Omega$ [deg]')
ax5.grid()
ax5.plot(simulation_time_in_years, simulation_results['longitude_of_ascending_node'].apply(math.degrees),color='k')
# Plot true anomaly time history.
ax6.set_xlabel(r'$t$ [s]')
ax6.set_ylabel(r'$\theta$ [deg]')
ax6.grid()
ax6.plot(simulation_time_in_years, simulation_results['true_anomaly'].apply(math.degrees),color='k')
# Save figure.
plt.tight_layout()
plt.savefig(output_path_prefix + config["eccentricity_change_figure"], dpi=config["figure_dpi"])
# number_of_simulations = len(initial_states)
# times = simulation_results[simulation_results['simulation_id'] == 1 ]['time']
# zero_change = pd.DataFrame(np.zeros((number_of_simulations)))
# # Generate histograms for initial_states in Keplerian elements.
# fig = plt.figure()
# ax1 = fig.add_subplot(2, 3, 1)
# ax2 = fig.add_subplot(2, 3, 2)
# ax3 = fig.add_subplot(2, 3, 3)
# ax4 = fig.add_subplot(2, 3, 4)
# ax5 = fig.add_subplot(2, 3, 5)
# ax6 = fig.add_subplot(2, 3, 6)
# # Plot semi-major axis histogram.
# ax1.hist(initial_states['semi_major_axis']-config['semi_major_axis_reference'],color='k')
# ax1.set_xlabel('a [km]')
# ax1.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
# ax1.grid()
# # Plot eccentricity histogram.
# ax2.hist(initial_states['eccentricity'],color='k')
# ax2.set_xlabel('e [-]')
# ax2.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
# ax2.grid()
# # Plot inclination histogram.
# ax3.hist(initial_states['inclination'].apply(math.degrees),color='k')
# ax3.set_xlabel('i [deg]')
# ax3.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
# ax3.grid()
# # Plot argument of periapsis histogram.
# ax4.hist(initial_states['argument_of_periapsis'].apply(math.degrees),color='k')
# ax4.set_xlabel(r'$\omega$ [deg]')
# ax4.grid()
# # Plot longitude of ascending node histogram.
# ax5.hist(initial_states['longitude_of_ascending_node'].apply(math.degrees),color='k')
# ax5.set_xlabel(r'$\Omega$ [deg]')
# ax5.grid()
# # Plot true anomaly histogram.
# ax6.hist(initial_states['true_anomaly'].apply(math.degrees),color='k')
# ax6.set_xlabel(r'$\theta$ [deg]')
# ax6.grid()
# # Save figure.
# plt.tight_layout()
# plt.savefig(output_path_prefix + config["initial_states_figure"], dpi=config["figure_dpi"])
# # Generate semi-major axis change figure.
# fig = plt.figure()
# plt.xlabel(r'$a_{0}$ [km]')
# plt.ylabel(r'$\Delta a$ [km]')
# plt.grid()
# plt.plot(initial_states['semi_major_axis'],zero_change,marker='.',color='k',linestyle='None')
# simulation_id_mask = simulation_results['simulation_id'] == 1
# simulation_times = simulation_results['time'][simulation_id_mask]
# semi_major_axis_change = simulation_results['semi_major_axis'][simulation_id_mask] - simulation_results['semi_major_axis'][simulation_id_mask][0]
# print(initial_states['semi_major_axis'][initial_states['simulation_id'] == 1][0])
# # plt.plot(initial_states['semi_major_axis'],zero_change,marker='.',color='k',linestyle='None')
# # for x in range(1,len(times)):
# # # a_time = simulation_results[simulation_results['time'] == times[x-1]]['time']
# # semi_major_axis = simulation_results[simulation_results['time'] == times[x-1]]['semi_major_axis']
# # print(simulation_results['time'])
# # # semi_major_axis_change = pd.DataFrame(semi_major_axis.values-initial_states['semi_major_axis'].values)
# # # plt.plot(initial_states['semi_major_axis'],semi_major_axis_change,marker='.',color='k',linestyle='None')
# # Save figure.
# plt.tight_layout()
# plt.savefig(output_path_prefix + config["semi_major_axis_change_figure"], dpi=config["figure_dpi"])
# # # Generate eccentricity change figure.
# # fig = plt.figure()
# # plt.xlabel(r'$e_{0}$ [-]')
# # plt.ylabel(r'$\Delta e$ [-]')
# # plt.grid()
# # plt.plot(initial_states['eccentricity'],zero_change,marker='.',color='k',linestyle='None')
# # for x in range(1,len(times)):
# # # a_time = simulation_results[simulation_results['time'] == times[x-1]]['time']
# # eccentricity = simulation_results[simulation_results['time'] == times[x-1]]['eccentricity']
# # eccentricity_change = pd.DataFrame(eccentricity.values-initial_states['eccentricity'].values)
# # plt.plot(initial_states['eccentricity'],eccentricity_change,marker='.',color='k',linestyle='None')
# # # Save figure.
# # plt.tight_layout()
# # plt.savefig(output_path_prefix + config["eccentricity_change_figure"], dpi=config["figure_dpi"])
print ("Figures generated successfully!")
print ("")
# # print ("Generating animation ...")
# # # Generate animation of change in Keplerian elements.
# # fig = plt.figure()
# # # plt.tight_layout()
# # ax1 = fig.add_subplot(2, 3, 1)
# # ax2 = fig.add_subplot(2, 3, 2)
# # ax3 = fig.add_subplot(2, 3, 3)
# # ax4 = fig.add_subplot(2, 3, 4)
# # ax5 = fig.add_subplot(2, 3, 5)
# # ax6 = fig.add_subplot(2, 3, 6)
# # # Generate animation of semi-major axis change.
# # ax1.set_xlim(metadata['semi_major_axis_minimum'][0], metadata['semi_major_axis_maximum'][0])
# # ax1.set_ylim(-5.0,5.0)
# # ax1.set_xlabel(r'$a_{0}$ [km]')
# # ax1.set_ylabel(r'$\Delta a$ [km]')
# # line1, = ax1.plot([],[],marker='o',color='k',linestyle='None')
# # # ax2.set_xlim(0.0, 1.0e-2)
# # # ax2.set_ylim(-1, 1)
# # ax2.set_xlabel(r'$e_{0}$ [-]')
# # ax2.set_ylabel(r'$\Delta e$ [-]')
# # line2, = ax2.plot([],[],marker='o',color='k',linestyle='None')
# # # Set up animation functions.
# # def init():
# # ax1.plot(initial_states['semi_major_axis'],zero_change,marker='o',color='k',linestyle='None')
# # ax2.plot(initial_states['eccentricity'],zero_change,marker='o',color='k',linestyle='None')
# # def animate(i):
# # a_time = simulation_results[simulation_results['time'] == times[i]]['time']
# # semi_major_axis = simulation_results[simulation_results['time'] == times[i]]['semi_major_axis']
# # semi_major_axis_change = pd.DataFrame(semi_major_axis.values-initial_states['semi_major_axis'].values)
# # line1.set_data(initial_states['semi_major_axis'],semi_major_axis_change)
# # eccentricity = simulation_results[simulation_results['time'] == times[i]]['eccentricity']
# # eccentricity_change = pd.DataFrame(eccentricity.values-initial_states['eccentricity'].values)
# # line2.set_data(initial_states['eccentricity'],eccentricity_change)
# # return line1, line2
# # # Generate and save animation.
# # animation_data = animation.FuncAnimation(fig,animate,init_func=init,blit=False,frames=len(times))
# # animation_path = output_path_prefix + "test.mp4"
# # animation_data.save(animation_path,fps=50,bitrate=6000)
# # print ("Animation generated successfully!")
# # print ("")
# # if config['show_figures']:
# # plt.show()
# Stop timer
end_time = time.time( )
print ("")
print ("------------------------------------------------------------------")
print (" Exited successfully! ")
print ("------------------------------------------------------------------")
print ("")
# Print (elapsed time)
print ("(Script time: " + str("{:,g}".format(end_time - start_time)) + "s)")
print ("")
|
|
from core import dates
from db.db_manager import db_sync_manager
from delegate.geni.v3.rm_adaptor import AdaptorFactory
from extensions.sfa.util import xrn
from handler.geni.v3 import exceptions as geni_ex
from rspecs.commons import validate
import core
logger = core.log.getLogger("common-utils")
import random
class CommonUtils(object):
@staticmethod
def is_explicit_tn_allocation_orig(rspec):
# Check for SDN resources
sliver = rspec.of_sliver()
if sliver is not None:
return False
if CommonUtils.is_explicit_se_allocation(rspec):
return False
logger.info("This is an explicit TN allocation")
return True
@staticmethod
def is_explicit_tn_allocation(rspec):
nodes_exist = False
# Check for TN resources
try:
nodes = rspec.tn_nodes()
links = rspec.tn_links()
if ((len(nodes) > 0) or (len(links) > 0)):
nodes_exist = True
logger.info("This is an explicit TN allocation")
except:
pass
return nodes_exist
@staticmethod
def is_explicit_se_allocation(rspec):
nodes_exist = False
# Check for SE resources
try:
nodes = rspec.se_nodes()
links = rspec.se_links()
if ((len(nodes) > 0) or (len(links) > 0)):
nodes_exist = True
logger.info("This is an explicit SE allocation")
except:
pass
return nodes_exist
@staticmethod
def is_implicit_allocation(rspec):
# Ensure TN and SE resources are not present
return not(CommonUtils.is_explicit_tn_allocation(rspec)
and CommonUtils.is_explicit_se_allocation(rspec))
@staticmethod
def is_virtual_links(rspec):
try:
# Check for virtual links
vlinks = rspec.vl_links()
except:
vlinks = []
if len(vlinks) > 0:
logger.info("This is an allocation with virtual links")
return True
return False
@staticmethod
def get_random_list_position(list_length):
list_length = int(list_length)-1 if int(list_length) > 0 else 0
return random.randint(0, list_length)
@staticmethod
def get_random_range_value(start, end):
rnd_range = xrange(int(start), int(end)+1)
rnd_idx = CommonUtils.get_random_list_position(len(rnd_range))
return rnd_range[rnd_idx]
@staticmethod
def process_range_and_set_values(values):
new_values = []
try:
values_ranges = values.split(",")
logger.debug("Parsing list of ranges of VLANs: %s" % values_ranges)
values_ranges = [r.split("-") for r in values_ranges]
for value_range in values_ranges:
if len(value_range) == 2:
value_range = xrange(
int(value_range[0]), int(value_range[1])+1)
new_values.extend(value_range)
new_values = map(lambda x: int(x), new_values)
except Exception as e:
logger.warning("Could not generate range of available values. \
Details: %s" % e)
return new_values
@staticmethod
def fetch_user_name_from_geni_users(geni_users):
"""
Given the GENI 'geni_users' structure, retrieves the proper
client or user identifier (may be a name, hrn or urn).
@param geni_users geni_users structure, passed from handler
@return user identifier
"""
client_urn = None
if len(geni_users) >= 1:
# Any could be used
# client_urn = geni_users[0]["urn"]
client_urn = xrn.urn_to_hrn(
geni_users[0]["urn"])[0].replace("\\", "")
# client_urn = xrn.get_leaf(xrn.urn_to_hrn(
# geni_users[0]["urn"])[0])
return client_urn
@staticmethod
def convert_sliver_dates_to_datetime(geni_slivers,
geni_expires_value=None):
"""
Given the GENI slivers structure, converts every 'geni_expires'
field inside (in rfc3339) format to a datetime object. This is the
expected output by CLI clients (e.g. OMNI).
@param geni_slivers slivers structure, generated in delegate
@param geni_expires_value valid rfc3339 date
@return geni_slivers slivers structure, with date format modified
"""
for s in geni_slivers:
# The 'geni_expires_value' has precedence over the current value
geni_expires = geni_expires_value or s["geni_expires"]
if geni_expires is not None:
s["geni_expires"] = dates.rfc3339_to_datetime(geni_expires)
logger.debug("RO-Slivers(%d)=%s" % (len(geni_slivers), geni_slivers))
return geni_slivers
@staticmethod
def validate_rspec(rspec):
"""
Given an RSpec (XML structure), this method validates the
structure of the document, according to the GENI resource schemas.
@param rspec RSpec defining resources
@throws GENIv3GeneralError when RSpec format is invalid
"""
(result, error) = validate(rspec)
if result is not True:
m = "RSpec validation failure: %s" % (error,)
raise geni_ex.GENIv3GeneralError(m)
logger.info("Validation success!")
@staticmethod
def send_request_allocate_rspec(routing_key, req_rspec, slice_urn,
credentials, end_time):
peer = db_sync_manager.get_configured_peer_by_routing_key(routing_key)
logger.debug("Peer=%s" % (peer,))
adaptor, uri = AdaptorFactory.create_from_db(peer)
logger.debug("Adaptor=%s, uri=%s" % (adaptor, uri))
return adaptor.allocate(
slice_urn, credentials, str(req_rspec), end_time)
@staticmethod
def extend_slivers(values, routing_key, slivers, db_slivers):
logger.info("Slivers=%s" % (values,))
slivers.extend(values)
for dbs in values:
db_slivers.append({"geni_sliver_urn": dbs.get("geni_sliver_urn"),
"routing_key": routing_key})
@staticmethod
def manage_renew(peer, urns, creds, etime, beffort):
try:
adaptor, uri = AdaptorFactory.create_from_db(peer)
logger.debug("Adaptor=%s, uri=%s" % (adaptor, uri))
# return adaptor.renew(urns, creds[0]["geni_value"],
# etime, beffort)
return adaptor.renew(urns, creds, etime, beffort)
except Exception as e:
if beffort:
logger.error("manage_renew exception: %s", e)
return []
else:
logger.critical("manage_renew exception: %s", e)
raise e
@staticmethod
def manage_status(peer, urns, creds):
try:
adaptor, uri = AdaptorFactory.create_from_db(peer)
logger.debug("Adaptor=%s, uri=%s" % (adaptor, uri))
# return adaptor.status(urns, creds[0]["geni_value"])
return adaptor.status(urns, creds)
except Exception as e:
logger.error("manage_status exception: %s", e)
return []
@staticmethod
def manage_operational_action(peer, urns, creds, action, beffort):
try:
adaptor, uri = AdaptorFactory.create_from_db(peer)
logger.debug("Adaptor=%s, uri=%s" % (adaptor, uri))
return adaptor.perform_operational_action(
# urns, creds[0]["geni_value"], action, beffort)
urns, creds, action, beffort)
except Exception as e:
# It is possible that some RMs do not implement particular actions
# e.g. "geni_update_users", etc.
# http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/
# CommonConcepts#SliverOperationalActions
if beffort:
if action not in ["geni_start", "geni_stop", "geni_restart"]:
raise e
else:
logger.error("manage_operational_action exception: " +
"action=%s, details: %s" % (action, e))
return []
else:
logger.critical("manage_operational_action exception: " +
"action=%s, details: %s" % (action, e))
raise e
@staticmethod
def manage_delete(peer, urns, creds, beffort):
try:
adaptor, uri = AdaptorFactory.create_from_db(peer)
logger.debug("Adaptor=%s, uri=%s" % (adaptor, uri))
# return adaptor.delete(urns, creds[0]["geni_value"], beffort)
return adaptor.delete(urns, creds, beffort)
except Exception as e:
if beffort:
logger.error("manage_delete exception: %s" % (e,))
return []
else:
logger.critical("manage_delete exception: %s" % (e,))
raise e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.